drop_buffers() dereferences the buffer_head pointer returned by folio_buffers() without checking for NULL. This leads to a null pointer dereference when called from try_to_free_buffers() on a folio with no buffers attached. This happens when filemap_release_folio() is called on a folio belonging to a mapping with AS_RELEASE_ALWAYS set but without release_folio address_space operation defined. In such case, folio_needs_release() returns true because of AS_RELEASE_ALWAYS flag, the folio has no private buffer data, causing the try_to_free_buffers() with a folio that has no buffers. Adding NULL check for the buffer_head pointer and return false early if no buffers are attached to the folio. Reported-by: syzbot+e07658f51ca22ab65b4e@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=e07658f51ca22ab65b4e Fixes: 6439476311a6 ("fs: Convert drop_buffers() to use a folio") Signed-off-by: Deepakkumar Karn --- fs/buffer.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/fs/buffer.c b/fs/buffer.c index 838c0c571022..170d15a05d2d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -480,7 +480,7 @@ EXPORT_SYMBOL(mark_buffer_async_write); * try_to_free_buffers() will be operating against the *blockdev* mapping * at the time, not against the S_ISREG file which depends on those buffers. * So the locking for i_private_list is via the i_private_lock in the address_space - * which backs the buffers. Which is different from the address_space + * which backs the buffers. Which is different from the address_space * against which the buffers are listed. So for a particular address_space, * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact, * mapping->i_private_list will always be protected by the backing blockdev's @@ -771,7 +771,7 @@ EXPORT_SYMBOL(block_dirty_folio); * Do this in two main stages: first we copy dirty buffers to a * temporary inode list, queueing the writes as we go. Then we clean * up, waiting for those writes to complete. - * + * * During this second stage, any subsequent updates to the file may end * up refiling the buffer on the original inode's dirty list again, so * there is a chance we will end up with a buffer queued for write but @@ -848,7 +848,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) brelse(bh); spin_lock(lock); } - + spin_unlock(lock); err2 = osync_buffers_list(lock, list); if (err) @@ -1000,7 +1000,7 @@ static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) /* * Initialise the state of a blockdev folio's buffers. - */ + */ static sector_t folio_init_buffers(struct folio *folio, struct block_device *bdev, unsigned size) { @@ -1546,7 +1546,7 @@ bool has_bh_in_lru(int cpu, void *dummy) { struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); int i; - + for (i = 0; i < BH_LRU_SIZE; i++) { if (b->bhs[i]) return true; @@ -2166,7 +2166,7 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len, if (folio_test_uptodate(folio)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); - continue; + continue; } if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh) && @@ -2468,7 +2468,7 @@ EXPORT_SYMBOL(block_read_full_folio); /* utility function for filesystems that need to do work on expanding * truncates. Uses filesystem pagecache writes to allow the filesystem to - * deal with the hole. + * deal with the hole. */ int generic_cont_expand_simple(struct inode *inode, loff_t size) { @@ -2893,6 +2893,10 @@ drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free) struct buffer_head *head = folio_buffers(folio); struct buffer_head *bh; + /* In cases of folio without buffer_head*/ + if (!head) + return false; + bh = head; do { if (buffer_busy(bh)) -- 2.52.0