======================================================
WARNING: possible circular locking dependency detected
syzkaller #0 Not tainted
------------------------------------------------------
kworker/u8:2/5473 is trying to acquire lock:
ffffffff8ea85520 (fs_reclaim){+.+.}-{0:0}, at: kmem_cache_alloc_node_noprof+0x4a/0x690

but task is already holding lock:
ffff8881175bb530 (&idev->mc_lock){+.+.}-{4:4}, at: ipv6_mc_netdev_event+0xe1/0x5a0

which lock already depends on the new lock.


the existing dependency chain (in reverse order) is:

-> #7 (&idev->mc_lock){+.+.}-{4:4}:
       __mutex_lock+0x1a3/0x1550
       __ipv6_dev_mc_inc+0x4e/0xa50
       __ipv6_sock_mc_join+0x503/0x750
       do_ipv6_setsockopt+0x22e5/0x3150
       ipv6_setsockopt+0x59/0x170
       do_sock_setsockopt+0x17c/0x1b0
       __x64_sys_setsockopt+0x13d/0x1b0
       do_syscall_64+0x15f/0xf80
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-> #6 (sk_lock-AF_INET6){+.+.}-{0:0}:
       lock_sock_nested+0x41/0x100
       inet_shutdown+0x6a/0x390
       nbd_mark_nsock_dead+0x2e9/0x560
       recv_work+0x1c2e/0x1d40
       process_scheduled_works+0xb5d/0x1860
       worker_thread+0xa53/0xfc0
       kthread+0x388/0x470
       ret_from_fork+0x514/0xb70
       ret_from_fork_asm+0x1a/0x30

-> #5 (&nsock->tx_lock){+.+.}-{4:4}:
       __mutex_lock+0x1a3/0x1550
       nbd_queue_rq+0x37b/0x1100
       blk_mq_dispatch_rq_list+0xa70/0x1910
       __blk_mq_sched_dispatch_requests+0xdcc/0x1600
       blk_mq_sched_dispatch_requests+0xd7/0x190
       blk_mq_run_hw_queue+0x348/0x4f0
       blk_mq_dispatch_list+0xd16/0xe10
       blk_mq_flush_plug_list+0x48d/0x570
       __blk_flush_plug+0x3ed/0x4d0
       __submit_bio+0x28d/0x580
       submit_bio_noacct_nocheck+0x2f4/0xa40
       block_read_full_folio+0x599/0x830
       filemap_read_folio+0x137/0x3b0
       do_read_cache_folio+0x358/0x590
       read_part_sector+0xb6/0x2b0
       adfspart_check_ICS+0xb1/0x960
       bdev_disk_changed+0x817/0x1770
       blkdev_get_whole+0x380/0x510
       bdev_open+0x31e/0xd30
       blkdev_open+0x470/0x610
       do_dentry_open+0x785/0x14e0
       vfs_open+0x3b/0x340
       path_openat+0x2e08/0x3860
       do_file_open+0x23e/0x4a0
       do_sys_openat2+0x113/0x200
       __x64_sys_openat+0x138/0x170
       do_syscall_64+0x15f/0xf80
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-> #4 (&cmd->lock){+.+.}-{4:4}:
       __mutex_lock+0x1a3/0x1550
       nbd_queue_rq+0xc6/0x1100
       blk_mq_dispatch_rq_list+0xa70/0x1910
       __blk_mq_sched_dispatch_requests+0xdcc/0x1600
       blk_mq_sched_dispatch_requests+0xd7/0x190
       blk_mq_run_hw_queue+0x348/0x4f0
       blk_mq_dispatch_list+0xd16/0xe10
       blk_mq_flush_plug_list+0x48d/0x570
       __blk_flush_plug+0x3ed/0x4d0
       __submit_bio+0x28d/0x580
       submit_bio_noacct_nocheck+0x2f4/0xa40
       block_read_full_folio+0x599/0x830
       filemap_read_folio+0x137/0x3b0
       do_read_cache_folio+0x358/0x590
       read_part_sector+0xb6/0x2b0
       adfspart_check_ICS+0xb1/0x960
       bdev_disk_changed+0x817/0x1770
       blkdev_get_whole+0x380/0x510
       bdev_open+0x31e/0xd30
       blkdev_open+0x470/0x610
       do_dentry_open+0x785/0x14e0
       vfs_open+0x3b/0x340
       path_openat+0x2e08/0x3860
       do_file_open+0x23e/0x4a0
       do_sys_openat2+0x113/0x200
       __x64_sys_openat+0x138/0x170
       do_syscall_64+0x15f/0xf80
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-> #3 (set->srcu){.+.+}-{0:0}:
       __synchronize_srcu+0xca/0x300
       elevator_switch+0x1e8/0x7a0
       elevator_change+0x2cc/0x450
       elevator_set_default+0x36c/0x430
       blk_register_queue+0x3e9/0x4e0
       __add_disk+0x677/0xd50
       add_disk_fwnode+0xfb/0x480
       nbd_dev_add+0x72c/0xb50
       nbd_init+0x168/0x1f0
       do_one_initcall+0x250/0x870
       do_initcall_level+0x104/0x190
       do_initcalls+0x59/0xa0
       kernel_init_freeable+0x2a6/0x3e0
       kernel_init+0x1d/0x1d0
       ret_from_fork+0x514/0xb70
       ret_from_fork_asm+0x1a/0x30

-> #2 (&q->elevator_lock){+.+.}-{4:4}:
       __mutex_lock+0x1a3/0x1550
       elevator_change+0x1b3/0x450
       elevator_set_none+0xb5/0x140
       blk_mq_update_nr_hw_queues+0x5e7/0x1a60
       nbd_start_device+0x17f/0xb10
       nbd_genl_connect+0x165b/0x1cf0
       genl_family_rcv_msg_doit+0x22a/0x330
       genl_rcv_msg+0x61c/0x7a0
       netlink_rcv_skb+0x232/0x4b0
       genl_rcv+0x28/0x40
       netlink_unicast+0x75c/0x8e0
       netlink_sendmsg+0x813/0xb40
       ____sys_sendmsg+0x972/0x9f0
       ___sys_sendmsg+0x2a5/0x360
       __x64_sys_sendmsg+0x1bd/0x2a0
       do_syscall_64+0x15f/0xf80
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-> #1 (&q->q_usage_counter(io)#51){++++}-{0:0}:
       blk_alloc_queue+0x546/0x680
       __blk_mq_alloc_disk+0x197/0x390
       nbd_dev_add+0x499/0xb50
       nbd_init+0x168/0x1f0
       do_one_initcall+0x250/0x870
       do_initcall_level+0x104/0x190
       do_initcalls+0x59/0xa0
       kernel_init_freeable+0x2a6/0x3e0
       kernel_init+0x1d/0x1d0
       ret_from_fork+0x514/0xb70
       ret_from_fork_asm+0x1a/0x30

-> #0 (fs_reclaim){+.+.}-{0:0}:
       __lock_acquire+0x15a5/0x2cf0
       lock_acquire+0x106/0x350
       fs_reclaim_acquire+0x71/0x100
       kmem_cache_alloc_node_noprof+0x4a/0x690
       __alloc_skb+0x1d0/0x7d0
       mld_newpack+0x14c/0xc90
       add_grhead+0x5a/0x2a0
       add_grec+0x1452/0x1740
       mld_send_report+0x1ed/0x380
       ipv6_mc_netdev_event+0x1b7/0x5a0
       notifier_call_chain+0x1ad/0x3d0
       call_netdevice_notifiers+0x90/0xd0
       bond_resend_igmp_join_requests_delayed+0x61/0x170
       process_scheduled_works+0xb5d/0x1860
       worker_thread+0xa53/0xfc0
       kthread+0x388/0x470
       ret_from_fork+0x514/0xb70
       ret_from_fork_asm+0x1a/0x30

other info that might help us debug this:

Chain exists of:
  fs_reclaim --> sk_lock-AF_INET6 --> &idev->mc_lock

 Possible unsafe locking scenario:

       CPU0                    CPU1
       ----                    ----
  lock(&idev->mc_lock);
                               lock(sk_lock-AF_INET6);
                               lock(&idev->mc_lock);
  lock(fs_reclaim);

 *** DEADLOCK ***

4 locks held by kworker/u8:2/5473:
 #0: ffff8881160e8140 ((wq_completion)bond22){+.+.}-{0:0}, at: process_scheduled_works+0xa35/0x1860
 #1: ffffc9000407fc40 ((work_completion)(&(&bond->mcast_work)->work)){+.+.}-{0:0}, at: process_scheduled_works+0xa70/0x1860
 #2: ffffffff8fdd0300 (rtnl_mutex){+.+.}-{4:4}, at: bond_resend_igmp_join_requests_delayed+0x23/0x170
 #3: ffff8881175bb530 (&idev->mc_lock){+.+.}-{4:4}, at: ipv6_mc_netdev_event+0xe1/0x5a0

stack backtrace:
CPU: 0 UID: 0 PID: 5473 Comm: kworker/u8:2 Not tainted syzkaller #0 PREEMPT(full) 
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-debian-1.16.2-1 04/01/2014
Workqueue: bond22 bond_resend_igmp_join_requests_delayed
Call Trace:
 <TASK>
 dump_stack_lvl+0xe8/0x150
 print_circular_bug+0x2e1/0x300
 check_noncircular+0x12e/0x150
 __lock_acquire+0x15a5/0x2cf0
 lock_acquire+0x106/0x350
 fs_reclaim_acquire+0x71/0x100
 kmem_cache_alloc_node_noprof+0x4a/0x690
 __alloc_skb+0x1d0/0x7d0
 mld_newpack+0x14c/0xc90
 add_grhead+0x5a/0x2a0
 add_grec+0x1452/0x1740
 mld_send_report+0x1ed/0x380
 ipv6_mc_netdev_event+0x1b7/0x5a0
 notifier_call_chain+0x1ad/0x3d0
 call_netdevice_notifiers+0x90/0xd0
 bond_resend_igmp_join_requests_delayed+0x61/0x170
 process_scheduled_works+0xb5d/0x1860
 worker_thread+0xa53/0xfc0
 kthread+0x388/0x470
 ret_from_fork+0x514/0xb70
 ret_from_fork_asm+0x1a/0x30
 </TASK>
