======================================================
WARNING: possible circular locking dependency detected
syzkaller #0 Not tainted
------------------------------------------------------
syz.2.16954/12445 is trying to acquire lock:
ffffffff8ea85520 (fs_reclaim){+.+.}-{0:0}, at: prepare_alloc_pages+0x152/0x650

but task is already holding lock:
ffff888112f1b070 (&vma_lock->rw_sema){++++}-{4:4}, at: hugetlb_fault+0x4c2/0x1510

which lock already depends on the new lock.


the existing dependency chain (in reverse order) is:

-> #8 (&vma_lock->rw_sema){++++}-{4:4}:
       down_read+0x47/0x2e0
       __get_user_pages+0x5e4/0x2720
       populate_vma_page_range+0x2be/0x3c0
       __mm_populate+0x25f/0x390
       vm_mmap_pgoff+0x3aa/0x4f0
       ksys_mmap_pgoff+0x586/0x760
       do_syscall_64+0x15f/0xf80
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-> #7 (&mm->mmap_lock){++++}-{4:4}:
       __might_fault+0xcb/0x130
       _copy_from_user+0x28/0xb0
       csum_and_copy_from_iter_full+0x1e7/0x1f00
       ip_generic_getfrag+0x149/0x2d0
       __ip6_append_data+0x39cd/0x3f60
       ip6_append_data+0x10f/0x280
       rawv6_sendmsg+0x12d3/0x18e0
       ____sys_sendmsg+0x80a/0x9f0
       ___sys_sendmsg+0x2a5/0x360
       __x64_sys_sendmsg+0x1bd/0x2a0
       do_syscall_64+0x15f/0xf80
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-> #6 (sk_lock-AF_INET6){+.+.}-{0:0}:
       lock_sock_nested+0x41/0x100
       inet_shutdown+0x6a/0x390
       nbd_mark_nsock_dead+0x2e9/0x560
       recv_work+0x1c2e/0x1d40
       process_scheduled_works+0xb5d/0x1860
       worker_thread+0xa53/0xfc0
       kthread+0x388/0x470
       ret_from_fork+0x514/0xb70
       ret_from_fork_asm+0x1a/0x30

-> #5 (&nsock->tx_lock){+.+.}-{4:4}:
       __mutex_lock+0x1a3/0x1550
       nbd_queue_rq+0x37b/0x1100
       blk_mq_dispatch_rq_list+0xa70/0x1910
       __blk_mq_sched_dispatch_requests+0xdcc/0x1600
       blk_mq_sched_dispatch_requests+0xd7/0x190
       blk_mq_run_hw_queue+0x348/0x4f0
       blk_mq_dispatch_list+0xd16/0xe10
       blk_mq_flush_plug_list+0x48d/0x570
       __blk_flush_plug+0x3ed/0x4d0
       __submit_bio+0x28d/0x580
       submit_bio_noacct_nocheck+0x2f4/0xa40
       block_read_full_folio+0x599/0x830
       filemap_read_folio+0x137/0x3b0
       do_read_cache_folio+0x358/0x590
       read_part_sector+0xb6/0x2b0
       adfspart_check_ICS+0xb1/0x960
       bdev_disk_changed+0x817/0x1770
       blkdev_get_whole+0x380/0x510
       bdev_open+0x31e/0xd30
       blkdev_open+0x470/0x610
       do_dentry_open+0x785/0x14e0
       vfs_open+0x3b/0x340
       path_openat+0x2e08/0x3860
       do_file_open+0x23e/0x4a0
       do_sys_openat2+0x113/0x200
       __x64_sys_openat+0x138/0x170
       do_syscall_64+0x15f/0xf80
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-> #4 (&cmd->lock){+.+.}-{4:4}:
       __mutex_lock+0x1a3/0x1550
       nbd_queue_rq+0xc6/0x1100
       blk_mq_dispatch_rq_list+0xa70/0x1910
       __blk_mq_sched_dispatch_requests+0xdcc/0x1600
       blk_mq_sched_dispatch_requests+0xd7/0x190
       blk_mq_run_hw_queue+0x348/0x4f0
       blk_mq_dispatch_list+0xd16/0xe10
       blk_mq_flush_plug_list+0x48d/0x570
       __blk_flush_plug+0x3ed/0x4d0
       __submit_bio+0x28d/0x580
       submit_bio_noacct_nocheck+0x2f4/0xa40
       block_read_full_folio+0x599/0x830
       filemap_read_folio+0x137/0x3b0
       do_read_cache_folio+0x358/0x590
       read_part_sector+0xb6/0x2b0
       adfspart_check_ICS+0xb1/0x960
       bdev_disk_changed+0x817/0x1770
       blkdev_get_whole+0x380/0x510
       bdev_open+0x31e/0xd30
       blkdev_open+0x470/0x610
       do_dentry_open+0x785/0x14e0
       vfs_open+0x3b/0x340
       path_openat+0x2e08/0x3860
       do_file_open+0x23e/0x4a0
       do_sys_openat2+0x113/0x200
       __x64_sys_openat+0x138/0x170
       do_syscall_64+0x15f/0xf80
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-> #3 (set->srcu){.+.+}-{0:0}:
       __synchronize_srcu+0xca/0x300
       elevator_switch+0x1e8/0x7a0
       elevator_change+0x2cc/0x450
       elevator_set_default+0x36c/0x430
       blk_register_queue+0x3e9/0x4e0
       __add_disk+0x677/0xd50
       add_disk_fwnode+0xfb/0x480
       nbd_dev_add+0x72c/0xb50
       nbd_init+0x168/0x1f0
       do_one_initcall+0x250/0x870
       do_initcall_level+0x104/0x190
       do_initcalls+0x59/0xa0
       kernel_init_freeable+0x2a6/0x3e0
       kernel_init+0x1d/0x1d0
       ret_from_fork+0x514/0xb70
       ret_from_fork_asm+0x1a/0x30

-> #2 (&q->elevator_lock){+.+.}-{4:4}:
       __mutex_lock+0x1a3/0x1550
       elevator_change+0x1b3/0x450
       elevator_set_none+0xb5/0x140
       blk_mq_update_nr_hw_queues+0x5e7/0x1a60
       nbd_start_device+0x17f/0xb10
       nbd_genl_connect+0x165b/0x1cf0
       genl_family_rcv_msg_doit+0x22a/0x330
       genl_rcv_msg+0x61c/0x7a0
       netlink_rcv_skb+0x232/0x4b0
       genl_rcv+0x28/0x40
       netlink_unicast+0x75c/0x8e0
       netlink_sendmsg+0x813/0xb40
       ____sys_sendmsg+0x972/0x9f0
       ___sys_sendmsg+0x2a5/0x360
       __x64_sys_sendmsg+0x1bd/0x2a0
       do_syscall_64+0x15f/0xf80
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-> #1 (&q->q_usage_counter(io)#51){++++}-{0:0}:
       blk_alloc_queue+0x546/0x680
       __blk_mq_alloc_disk+0x197/0x390
       nbd_dev_add+0x499/0xb50
       nbd_init+0x168/0x1f0
       do_one_initcall+0x250/0x870
       do_initcall_level+0x104/0x190
       do_initcalls+0x59/0xa0
       kernel_init_freeable+0x2a6/0x3e0
       kernel_init+0x1d/0x1d0
       ret_from_fork+0x514/0xb70
       ret_from_fork_asm+0x1a/0x30

-> #0 (fs_reclaim){+.+.}-{0:0}:
       __lock_acquire+0x15a5/0x2cf0
       lock_acquire+0x106/0x350
       fs_reclaim_acquire+0x71/0x100
       prepare_alloc_pages+0x152/0x650
       __alloc_frozen_pages_noprof+0x12f/0x380
       alloc_pages_mpol+0x235/0x490
       alloc_pages_noprof+0xac/0x2a0
       __pud_alloc+0x3a/0x460
       huge_pte_alloc+0x4f7/0x630
       hugetlb_fault+0x51b/0x1510
       handle_mm_fault+0x2007/0x3170
       do_user_addr_fault+0xa73/0x1340
       exc_page_fault+0x6a/0xc0
       asm_exc_page_fault+0x26/0x30

other info that might help us debug this:

Chain exists of:
  fs_reclaim --> &mm->mmap_lock --> &vma_lock->rw_sema

 Possible unsafe locking scenario:

       CPU0                    CPU1
       ----                    ----
  rlock(&vma_lock->rw_sema);
                               lock(&mm->mmap_lock);
                               lock(&vma_lock->rw_sema);
  lock(fs_reclaim);

 *** DEADLOCK ***

3 locks held by syz.2.16954/12445:
 #0: ffff888106bed888 (vm_lock){++++}-{0:0}, at: lock_vma_under_rcu+0x1d1/0x500
 #1: ffff888103ab4858 (&hugetlb_fault_mutex_table[i]){+.+.}-{4:4}, at: hugetlb_fault+0x3d8/0x1510
 #2: ffff888112f1b070 (&vma_lock->rw_sema){++++}-{4:4}, at: hugetlb_fault+0x4c2/0x1510

stack backtrace:
CPU: 1 UID: 0 PID: 12445 Comm: syz.2.16954 Not tainted syzkaller #0 PREEMPT(full) 
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-debian-1.16.2-1 04/01/2014
Call Trace:
 <TASK>
 dump_stack_lvl+0xe8/0x150
 print_circular_bug+0x2e1/0x300
 check_noncircular+0x12e/0x150
 __lock_acquire+0x15a5/0x2cf0
 lock_acquire+0x106/0x350
 fs_reclaim_acquire+0x71/0x100
 prepare_alloc_pages+0x152/0x650
 __alloc_frozen_pages_noprof+0x12f/0x380
 alloc_pages_mpol+0x235/0x490
 alloc_pages_noprof+0xac/0x2a0
 __pud_alloc+0x3a/0x460
 huge_pte_alloc+0x4f7/0x630
 hugetlb_fault+0x51b/0x1510
 handle_mm_fault+0x2007/0x3170
 do_user_addr_fault+0xa73/0x1340
 exc_page_fault+0x6a/0xc0
 asm_exc_page_fault+0x26/0x30
RIP: 0033:0x7fb5da4664e5
Code: 48 8b 04 24 48 85 c0 74 17 48 8b 74 24 10 48 0f ce 48 89 74 24 10 48 83 f8 01 0f 85 f0 02 00 00 48 8b 44 24 18 48 8b 54 24 10 <48> 89 10 e9 5a fe ff ff 48 8b 44 24 20 48 0b 44 24 28 75 30 48 8b
RSP: 002b:00007ffc25c3a640 EFLAGS: 00010246
RAX: 00002000000000c0 RBX: 0000000000000008 RCX: 0000000000000000
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 00005555658c93d8
RBP: 00007ffc25c3a760 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000000 R12: 00007ffc25c3a7a0
R13: 00007fb5da815fac R14: 00000000000af13e R15: 00007fb5da815fa0
 </TASK>
