Most processes do not use Rust Binder with epoll, so avoid paying the synchronize_rcu() cost in drop for those that don't need it. For those that do, we also manage to replace synchronize_rcu() with kfree_rcu(), though we introduce an extra allocation. Signed-off-by: Alice Ryhl --- drivers/android/binder/process.rs | 2 +- drivers/android/binder/thread.rs | 18 +++++++++++------- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs index 132055b4790f0ec69a87635b498909df2bf475e2..9374f1a86766c09321b57e565b6317cc290ea32b 100644 --- a/drivers/android/binder/process.rs +++ b/drivers/android/binder/process.rs @@ -1684,7 +1684,7 @@ pub(crate) fn poll( table: PollTable<'_>, ) -> Result { let thread = this.get_current_thread()?; - let (from_proc, mut mask) = thread.poll(file, table); + let (from_proc, mut mask) = thread.poll(file, table)?; if mask == 0 && from_proc && !this.inner.lock().work.is_empty() { mask |= bindings::POLLIN; } diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs index 82264db06507d4641b60cbed96af482a9d36e7b2..8f09cf1599ae7edcf2ee60b2cb1b08cc2d0afd3f 100644 --- a/drivers/android/binder/thread.rs +++ b/drivers/android/binder/thread.rs @@ -16,7 +16,7 @@ seq_file::SeqFile, seq_print, sync::atomic::{ordering::Relaxed, Atomic}, - sync::poll::{PollCondVar, PollTable}, + sync::poll::{PollTable, UpgradePollCondVar}, sync::{Arc, SpinLock}, task::Task, types::ARef, @@ -412,7 +412,7 @@ pub(crate) struct Thread { #[pin] inner: SpinLock, #[pin] - work_condvar: PollCondVar, + work_condvar: UpgradePollCondVar, /// Used to insert this thread into the process' `ready_threads` list. /// /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`. @@ -443,7 +443,7 @@ pub(crate) fn new(id: i32, process: Arc) -> Result> { process, task: ARef::from(&**kernel::current!()), inner <- kernel::new_spinlock!(inner, "Thread::inner"), - work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"), + work_condvar <- kernel::new_upgrade_poll_condvar!("Thread::work_condvar"), links <- ListLinks::new(), links_track <- AtomicTracker::new(), }), @@ -1484,10 +1484,15 @@ pub(crate) fn write_read(self: &Arc, data: UserSlice, wait: bool) -> Resul ret } - pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) { - table.register_wait(file, &self.work_condvar); + pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> Result<(bool, u32)> { + let condvar = self.work_condvar.poll( + &self.inner, + c"Thread::work_condvar (upgraded)", + kernel::static_lock_class!(), + )?; + table.register_wait(file, condvar); let mut inner = self.inner.lock(); - (inner.should_use_process_work_queue(), inner.poll()) + Ok((inner.should_use_process_work_queue(), inner.poll())) } /// Make the call to `get_work` or `get_work_local` return immediately, if any. @@ -1523,7 +1528,6 @@ pub(crate) fn notify_if_poll_ready(&self, sync: bool) { pub(crate) fn release(self: &Arc) { self.inner.lock().is_dead = true; - //self.work_condvar.clear(); self.unwind_transaction_stack(); // Cancel all pending work items. -- 2.52.0.457.g6b5491de43-goog