When installing missing pages (or zapping them), Rust Binder will look up the vma in the mm by address, and then call vm_insert_page (or zap_page_range_single). However, if the vma is closed and replaced with a different vma at the same address, this can lead to Rust Binder installing pages into the wrong vma. By installing the page into a writable vma, it becomes possible to write to your own binder pages, which are normally read-only. Although you're not supposed to be able to write to those pages, the intent behind the design of Rust Binder is that even if you get that ability, it should not lead to anything bad. Unfortunately, due to another bug, that is not the case. To fix this, I will store a pointer in vm_private_data and check that the vma returned by vma_lookup() has the right vm_ops and vm_private_data before trying to use the vma. This should ensure that Rust Binder will refuse to interact with any other VMA. I will follow up this patch with more vma abstractions to avoid this unsafe access to vm_ops and vm_private_data, but for now I'd like to start with the simplest possible fix. C Binder performs the same check in a slightly different way: it provides a vm_ops->close that sets a boolean to true, then checks that boolean after calling vma_lookup(), but I think this is more fragile than the solution in this patch. (We probably still want to do both, but I'll add the vm_ops->close callback with the follow-up vma API changes.) Cc: stable@vger.kernel.org Fixes: eafedbc7c050 ("rust_binder: add Rust Binder driver") Reported-by: Jann Horn Signed-off-by: Alice Ryhl --- drivers/android/binder/page_range.rs | 78 +++++++++++++++++++++++++++--------- 1 file changed, 58 insertions(+), 20 deletions(-) diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs index fdd97112ef5c8b2341e498dc3567b659f05e3fd7..90bab18961443c6e59699cb7345e41e0db80f0dd 100644 --- a/drivers/android/binder/page_range.rs +++ b/drivers/android/binder/page_range.rs @@ -142,6 +142,27 @@ pub(crate) struct ShrinkablePageRange { _pin: PhantomPinned, } +// We do not define any ops. For now, used only to check identity of vmas. +static BINDER_VM_OPS: bindings::vm_operations_struct = pin_init::zeroed(); + +// To ensure that we do not accidentally install pages into or zap pages from the wrong vma, we +// check its vm_ops and private data before using it. +fn check_vma(vma: &virt::VmaRef, owner: *const ShrinkablePageRange) -> Option<&virt::VmaMixedMap> { + // SAFETY: Just reading the vm_ops pointer of any active vma is safe. + let vm_ops = unsafe { (*vma.as_ptr()).vm_ops }; + if !ptr::eq(vm_ops, &BINDER_VM_OPS) { + return None; + } + + // SAFETY: Reading the vm_private_data pointer of a binder-owned vma is safe. + let vm_private_data = unsafe { (*vma.as_ptr()).vm_private_data }; + if !ptr::eq(vm_private_data, owner.cast()) { + return None; + } + + vma.as_mixedmap_vma() +} + struct Inner { /// Array of pages. /// @@ -308,6 +329,16 @@ pub(crate) fn register_with_vma(&self, vma: &virt::VmaNew) -> Result { inner.size = num_pages; inner.vma_addr = vma.start(); + // This pointer is only used for comparison - it's not dereferenced. + // + // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on + // `vm_private_data`. + unsafe { (*vma.as_ptr()).vm_private_data = self as *const Self as *mut c_void }; + + // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on + // `vm_ops`. + unsafe { (*vma.as_ptr()).vm_ops = &BINDER_VM_OPS }; + Ok(num_pages) } @@ -399,22 +430,24 @@ unsafe fn use_page_slow(&self, i: usize) -> Result<()> { // // Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a // workqueue. - MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?) - .mmap_read_lock() - .vma_lookup(vma_addr) - .ok_or(ESRCH)? - .as_mixedmap_vma() - .ok_or(ESRCH)? - .vm_insert_page(user_page_addr, &new_page) - .inspect_err(|err| { - pr_warn!( - "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}", - user_page_addr, - vma_addr, - i, - err - ) - })?; + check_vma( + MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?) + .mmap_read_lock() + .vma_lookup(vma_addr) + .ok_or(ESRCH)?, + self, + ) + .ok_or(ESRCH)? + .vm_insert_page(user_page_addr, &new_page) + .inspect_err(|err| { + pr_warn!( + "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}", + user_page_addr, + vma_addr, + i, + err + ) + })?; let inner = self.lock.lock(); @@ -667,12 +700,15 @@ fn drop(self: Pin<&mut Self>) { let mmap_read; let mm_mutex; let vma_addr; + let range_ptr; { // CAST: The `list_head` field is first in `PageInfo`. let info = item as *mut PageInfo; // SAFETY: The `range` field of `PageInfo` is immutable. - let range = unsafe { &*((*info).range) }; + range_ptr = unsafe { (*info).range }; + // SAFETY: The `range` outlives its `PageInfo` values. + let range = unsafe { &*range_ptr }; mm = match range.mm.mmget_not_zero() { Some(mm) => MmWithUser::into_mmput_async(mm), @@ -717,9 +753,11 @@ fn drop(self: Pin<&mut Self>) { // SAFETY: The lru lock is locked when this method is called. unsafe { bindings::spin_unlock(&raw mut (*lru).lock) }; - if let Some(vma) = mmap_read.vma_lookup(vma_addr) { - let user_page_addr = vma_addr + (page_index << PAGE_SHIFT); - vma.zap_page_range_single(user_page_addr, PAGE_SIZE); + if let Some(unchecked_vma) = mmap_read.vma_lookup(vma_addr) { + if let Some(vma) = check_vma(unchecked_vma, range_ptr) { + let user_page_addr = vma_addr + (page_index << PAGE_SHIFT); + vma.zap_page_range_single(user_page_addr, PAGE_SIZE); + } } drop(mmap_read); -- 2.53.0.273.g2a3d683680-goog