From: junbing dai During system suspend, processes in TASK_INTERRUPTIBLE state get forcibly awakened. By applying TASK_FREEZABLE flag, we prevent these unnecessary wakeups and reduce suspend/resume overhead Signed-off-by: junbing dai --- fs/eventpoll.c | 2 +- fs/fuse/dev.c | 2 +- fs/jbd2/journal.c | 2 +- fs/pipe.c | 4 ++-- fs/select.c | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 0fbf5dfedb24..6020575bdbab 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -2094,7 +2094,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, * the same lock on wakeup ep_poll_callback() side, so it * is safe to avoid an explicit barrier. */ - __set_current_state(TASK_INTERRUPTIBLE); + __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); /* * Do the final check under the lock. ep_start/done_scan() diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index e80cd8f2c049..b3dbd113e2e2 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1418,7 +1418,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, if (file->f_flags & O_NONBLOCK) return -EAGAIN; - err = wait_event_interruptible_exclusive(fiq->waitq, + err = wait_event_freezable_exclusive(fiq->waitq, !fiq->connected || request_pending(fiq)); if (err) return err; diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index d480b94117cd..a6ca1468ccfe 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -222,7 +222,7 @@ static int kjournald2(void *arg) DEFINE_WAIT(wait); prepare_to_wait(&journal->j_wait_commit, &wait, - TASK_INTERRUPTIBLE); + TASK_INTERRUPTIBLE|TASK_FREEZABLE); transaction = journal->j_running_transaction; if (transaction == NULL || time_before(jiffies, transaction->t_expires)) { diff --git a/fs/pipe.c b/fs/pipe.c index 45077c37bad1..a0e624fc734c 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -385,7 +385,7 @@ anon_pipe_read(struct kiocb *iocb, struct iov_iter *to) * since we've done any required wakeups and there's no need * to mark anything accessed. And we've dropped the lock. */ - if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0) + if (wait_event_freezable_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0) return -ERESTARTSYS; wake_next_reader = true; @@ -1098,7 +1098,7 @@ static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt) int cur = *cnt; while (cur == *cnt) { - prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE); + prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE|TASK_FREEZABLE); pipe_unlock(pipe); schedule(); finish_wait(&pipe->rd_wait, &rdwait); diff --git a/fs/select.c b/fs/select.c index 9fb650d03d52..0903a08b8067 100644 --- a/fs/select.c +++ b/fs/select.c @@ -600,7 +600,7 @@ static noinline_for_stack int do_select(int n, fd_set_bits *fds, struct timespec to = &expire; } - if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE, + if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE|TASK_FREEZABLE, to, slack)) timed_out = 1; } @@ -955,7 +955,7 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait, to = &expire; } - if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack)) + if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE, to, slack)) timed_out = 1; } return count; -- 2.25.1