In lbmRead(), the I/O event waited for by wait_event() finishes before it goes to sleep, and the lbmIODone() prematurely sets the flag to lbmDONE, thus ending the wait. This causes wait_event() to return before lbmREAD is cleared (because lbmDONE was set first), the premature return of wait_event() leads to the release of lbuf before lbmIODone() returns, thus triggering the use-after-free vulnerability reported in [1]. Moving the operation of setting the lbmDONE flag to after clearing lbmREAD in lbmIODone() avoids the use-after-free vulnerability reported in [1]. [1] BUG: KASAN: slab-use-after-free in rt_spin_lock+0x88/0x3e0 kernel/locking/spinlock_rt.c:56 Call Trace: blk_update_request+0x57e/0xe60 block/blk-mq.c:1007 blk_mq_end_request+0x3e/0x70 block/blk-mq.c:1169 blk_complete_reqs block/blk-mq.c:1244 [inline] blk_done_softirq+0x10a/0x160 block/blk-mq.c:1249 Allocated by task 6101: lbmLogInit fs/jfs/jfs_logmgr.c:1821 [inline] lmLogInit+0x3d0/0x19e0 fs/jfs/jfs_logmgr.c:1269 open_inline_log fs/jfs/jfs_logmgr.c:1175 [inline] lmLogOpen+0x4e1/0xfa0 fs/jfs/jfs_logmgr.c:1069 jfs_mount_rw+0xe9/0x670 fs/jfs/jfs_mount.c:257 jfs_fill_super+0x754/0xd80 fs/jfs/super.c:532 Freed by task 6101: kfree+0x1bd/0x900 mm/slub.c:6876 lbmLogShutdown fs/jfs/jfs_logmgr.c:1864 [inline] lmLogInit+0x1137/0x19e0 fs/jfs/jfs_logmgr.c:1415 open_inline_log fs/jfs/jfs_logmgr.c:1175 [inline] lmLogOpen+0x4e1/0xfa0 fs/jfs/jfs_logmgr.c:1069 jfs_mount_rw+0xe9/0x670 fs/jfs/jfs_mount.c:257 jfs_fill_super+0x754/0xd80 fs/jfs/super.c:532 Reported-by: syzbot+1d38eedcb25a3b5686a7@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=1d38eedcb25a3b5686a7 Signed-off-by: Edward Adam Davis --- v1 -> v2: fix potential deadlock fs/jfs/jfs_logmgr.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index b343c5ea1159..0db4bc9f2d6c 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -2180,8 +2180,6 @@ static void lbmIODone(struct bio *bio) LCACHE_LOCK(flags); /* disable+lock */ - bp->l_flag |= lbmDONE; - if (bio->bi_status) { bp->l_flag |= lbmERROR; @@ -2196,12 +2194,10 @@ static void lbmIODone(struct bio *bio) if (bp->l_flag & lbmREAD) { bp->l_flag &= ~lbmREAD; - LCACHE_UNLOCK(flags); /* unlock+enable */ - /* wakeup I/O initiator */ LCACHE_WAKEUP(&bp->l_ioevent); - return; + goto out; } /* @@ -2225,8 +2221,7 @@ static void lbmIODone(struct bio *bio) if (bp->l_flag & lbmDIRECT) { LCACHE_WAKEUP(&bp->l_ioevent); - LCACHE_UNLOCK(flags); - return; + goto out; } tail = log->wqueue; @@ -2278,8 +2273,6 @@ static void lbmIODone(struct bio *bio) * leave buffer for i/o initiator to dispose */ if (bp->l_flag & lbmSYNC) { - LCACHE_UNLOCK(flags); /* unlock+enable */ - /* wakeup I/O initiator */ LCACHE_WAKEUP(&bp->l_ioevent); } @@ -2290,6 +2283,7 @@ static void lbmIODone(struct bio *bio) else if (bp->l_flag & lbmGC) { LCACHE_UNLOCK(flags); lmPostGC(bp); + LCACHE_LOCK(flags); /* disable+lock */ } /* @@ -2302,9 +2296,11 @@ static void lbmIODone(struct bio *bio) assert(bp->l_flag & lbmRELEASE); assert(bp->l_flag & lbmFREE); lbmfree(bp); - - LCACHE_UNLOCK(flags); /* unlock+enable */ } + +out: + bp->l_flag |= lbmDONE; + LCACHE_UNLOCK(flags); } int jfsIOWait(void *arg) -- 2.43.0