When installing missing pages (or zapping them), Rust Binder will look up the vma in the mm by address, and then call vm_insert_page (or zap_page_range_single). However, if the vma is closed and replaced with a different vma at the same address, this can lead to Rust Binder installing pages into the wrong vma. By installing the page into a writable vma, it becomes possible to write to your own binder pages, which are normally read-only. Although you're not supposed to be able to write to those pages, the intent behind the design of Rust Binder is that even if you get that ability, it should not lead to anything bad. Unfortunately, due to another bug, that is not the case. To fix this, store a pointer in vm_private_data and check that the vma returned by vma_lookup() has the right vm_ops and vm_private_data before trying to use the vma. This should ensure that Rust Binder will refuse to interact with any other VMA. The plan is to introduce more vma abstractions to avoid this unsafe access to vm_ops and vm_private_data, but for now let's start with the simplest possible fix. C Binder performs the same check in a slightly different way: it provides a vm_ops->close that sets a boolean to true, then checks that boolean after calling vma_lookup(), but this is more fragile than the solution in this patch. (We probably still want to do both, but the vm_ops->close callback will be added later as part of the follow-up vma API changes.) It's still possible to remap the vma so that pages appear in the right vma, but at the wrong offset, but this is a separate issue and will be fixed when Rust Binder gets a vm_ops->close callback. Cc: stable@vger.kernel.org Fixes: eafedbc7c050 ("rust_binder: add Rust Binder driver") Reported-by: Jann Horn Reviewed-by: Jann Horn Signed-off-by: Alice Ryhl --- drivers/android/binder/page_range.rs | 83 +++++++++++++++++++++++++++--------- 1 file changed, 63 insertions(+), 20 deletions(-) diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs index fdd97112ef5c8b2341e498dc3567b659f05e3fd7..67aae783e8b8b7cf60ecf7e711d5f6f6f5d1dbe3 100644 --- a/drivers/android/binder/page_range.rs +++ b/drivers/android/binder/page_range.rs @@ -142,6 +142,30 @@ pub(crate) struct ShrinkablePageRange { _pin: PhantomPinned, } +// We do not define any ops. For now, used only to check identity of vmas. +static BINDER_VM_OPS: bindings::vm_operations_struct = pin_init::zeroed(); + +// To ensure that we do not accidentally install pages into or zap pages from the wrong vma, we +// check its vm_ops and private data before using it. +fn check_vma(vma: &virt::VmaRef, owner: *const ShrinkablePageRange) -> Option<&virt::VmaMixedMap> { + // SAFETY: Just reading the vm_ops pointer of any active vma is safe. + let vm_ops = unsafe { (*vma.as_ptr()).vm_ops }; + if !ptr::eq(vm_ops, &BINDER_VM_OPS) { + return None; + } + + // SAFETY: Reading the vm_private_data pointer of a binder-owned vma is safe. + let vm_private_data = unsafe { (*vma.as_ptr()).vm_private_data }; + // The ShrinkablePageRange is only dropped when the Process is dropped, which only happens once + // the file's ->release handler is invoked, which means the ShrinkablePageRange outlives any + // VMA associated with it, so there can't be any false positives due to pointer reuse here. + if !ptr::eq(vm_private_data, owner.cast()) { + return None; + } + + vma.as_mixedmap_vma() +} + struct Inner { /// Array of pages. /// @@ -308,6 +332,18 @@ pub(crate) fn register_with_vma(&self, vma: &virt::VmaNew) -> Result { inner.size = num_pages; inner.vma_addr = vma.start(); + // This pointer is only used for comparison - it's not dereferenced. + // + // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on + // `vm_private_data`. + unsafe { + (*vma.as_ptr()).vm_private_data = ptr::from_ref(self).cast_mut().cast::() + }; + + // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on + // `vm_ops`. + unsafe { (*vma.as_ptr()).vm_ops = &BINDER_VM_OPS }; + Ok(num_pages) } @@ -399,22 +435,24 @@ unsafe fn use_page_slow(&self, i: usize) -> Result<()> { // // Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a // workqueue. - MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?) - .mmap_read_lock() - .vma_lookup(vma_addr) - .ok_or(ESRCH)? - .as_mixedmap_vma() - .ok_or(ESRCH)? - .vm_insert_page(user_page_addr, &new_page) - .inspect_err(|err| { - pr_warn!( - "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}", - user_page_addr, - vma_addr, - i, - err - ) - })?; + check_vma( + MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?) + .mmap_read_lock() + .vma_lookup(vma_addr) + .ok_or(ESRCH)?, + self, + ) + .ok_or(ESRCH)? + .vm_insert_page(user_page_addr, &new_page) + .inspect_err(|err| { + pr_warn!( + "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}", + user_page_addr, + vma_addr, + i, + err + ) + })?; let inner = self.lock.lock(); @@ -667,12 +705,15 @@ fn drop(self: Pin<&mut Self>) { let mmap_read; let mm_mutex; let vma_addr; + let range_ptr; { // CAST: The `list_head` field is first in `PageInfo`. let info = item as *mut PageInfo; // SAFETY: The `range` field of `PageInfo` is immutable. - let range = unsafe { &*((*info).range) }; + range_ptr = unsafe { (*info).range }; + // SAFETY: The `range` outlives its `PageInfo` values. + let range = unsafe { &*range_ptr }; mm = match range.mm.mmget_not_zero() { Some(mm) => MmWithUser::into_mmput_async(mm), @@ -717,9 +758,11 @@ fn drop(self: Pin<&mut Self>) { // SAFETY: The lru lock is locked when this method is called. unsafe { bindings::spin_unlock(&raw mut (*lru).lock) }; - if let Some(vma) = mmap_read.vma_lookup(vma_addr) { - let user_page_addr = vma_addr + (page_index << PAGE_SHIFT); - vma.zap_page_range_single(user_page_addr, PAGE_SIZE); + if let Some(unchecked_vma) = mmap_read.vma_lookup(vma_addr) { + if let Some(vma) = check_vma(unchecked_vma, range_ptr) { + let user_page_addr = vma_addr + (page_index << PAGE_SHIFT); + vma.zap_page_range_single(user_page_addr, PAGE_SIZE); + } } drop(mmap_read); -- 2.53.0.310.g728cabbaf7-goog When sending a transaction, its offsets array is first copied into the target proc's vma, and then the values are read back from there. This is normally fine because the vma is a read-only mapping, so the target process cannot change the value under us. However, if the target process somehow gains the ability to write to its own vma, it could change the offset before it's read back, causing the kernel to misinterpret what the sender meant. If the sender happens to send a payload with a specific shape, this could in the worst case lead to the receiver being able to privilege escalate into the sender. The intent is that gaining the ability to change the read-only vma of your own process should not be exploitable, so remove this TOCTOU read even though it's unexploitable without another Binder bug. Cc: stable@vger.kernel.org Fixes: eafedbc7c050 ("rust_binder: add Rust Binder driver") Reported-by: Jann Horn Reviewed-by: Jann Horn Signed-off-by: Alice Ryhl --- drivers/android/binder/thread.rs | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs index 1f1709a6a77abc1c865cc9387e7ba7493448c71d..a81910f4cedf9bf485bf1cf954b95aee6c122cfd 100644 --- a/drivers/android/binder/thread.rs +++ b/drivers/android/binder/thread.rs @@ -1016,12 +1016,9 @@ pub(crate) fn copy_transaction_data( // Copy offsets if there are any. if offsets_size > 0 { - { - let mut reader = - UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size) - .reader(); - alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?; - } + let mut offsets_reader = + UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size) + .reader(); let offsets_start = aligned_data_size; let offsets_end = aligned_data_size + offsets_size; @@ -1042,11 +1039,9 @@ pub(crate) fn copy_transaction_data( .step_by(size_of::()) .enumerate() { - let offset: usize = view - .alloc - .read::(index_offset)? - .try_into() - .map_err(|_| EINVAL)?; + let offset = offsets_reader.read::()?; + view.alloc.write(index_offset, &offset)?; + let offset: usize = offset.try_into().map_err(|_| EINVAL)?; if offset < end_of_previous_object || !is_aligned(offset, size_of::()) { pr_warn!("Got transaction with invalid offset."); -- 2.53.0.310.g728cabbaf7-goog