======================================================
WARNING: possible circular locking dependency detected
syzkaller #0 Not tainted
------------------------------------------------------
kworker/u11:0/53 is trying to acquire lock:
ffffffff8ea85520 (fs_reclaim){+.+.}-{0:0}, at: __kmalloc_noprof+0x9d/0x760

but task is already holding lock:
ffffffff8f39f780 (dpm_list_mtx){+.+.}-{4:4}, at: device_move+0x3c/0x730

which lock already depends on the new lock.


the existing dependency chain (in reverse order) is:

-> #8 (dpm_list_mtx){+.+.}-{4:4}:
       __mutex_lock+0x1a3/0x1550
       device_pm_add+0x7d/0x350
       device_add+0x4fe/0xbb0
       device_create+0x269/0x300
       msr_device_create+0x33/0x50
       cpuhp_invoke_callback+0x445/0x860
       cpuhp_thread_fun+0x36b/0x780
       smpboot_thread_fn+0x541/0xa50
       kthread+0x388/0x470
       ret_from_fork+0x514/0xb70
       ret_from_fork_asm+0x1a/0x30

-> #7 (cpuhp_state-up){+.+.}-{0:0}:
       cpuhp_thread_fun+0x127/0x780
       smpboot_thread_fn+0x541/0xa50
       kthread+0x388/0x470
       ret_from_fork+0x514/0xb70
       ret_from_fork_asm+0x1a/0x30

-> #6 (cpu_hotplug_lock){++++}-{0:0}:
       cpus_read_lock+0x42/0x160
       static_key_slow_inc+0x12/0x30
       nbd_genl_reconfigure+0x132f/0x1ea0
       genl_family_rcv_msg_doit+0x22a/0x330
       genl_rcv_msg+0x61c/0x7a0
       netlink_rcv_skb+0x232/0x4b0
       genl_rcv+0x28/0x40
       netlink_unicast+0x75c/0x8e0
       netlink_sendmsg+0x813/0xb40
       ____sys_sendmsg+0x972/0x9f0
       ___sys_sendmsg+0x2a5/0x360
       __x64_sys_sendmsg+0x1bd/0x2a0
       do_syscall_64+0x15f/0xf80
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-> #5 (&nsock->tx_lock){+.+.}-{4:4}:
       __mutex_lock+0x1a3/0x1550
       nbd_queue_rq+0x37b/0x1100
       blk_mq_dispatch_rq_list+0xa70/0x1910
       __blk_mq_sched_dispatch_requests+0xdcc/0x1600
       blk_mq_sched_dispatch_requests+0xd7/0x190
       blk_mq_run_hw_queue+0x348/0x4f0
       blk_mq_dispatch_list+0xd16/0xe10
       blk_mq_flush_plug_list+0x48d/0x570
       __blk_flush_plug+0x3ed/0x4d0
       __submit_bio+0x28d/0x580
       submit_bio_noacct_nocheck+0x2f4/0xa40
       block_read_full_folio+0x599/0x830
       filemap_read_folio+0x137/0x3b0
       do_read_cache_folio+0x358/0x590
       read_part_sector+0xb6/0x2b0
       adfspart_check_ICS+0xb1/0x960
       bdev_disk_changed+0x817/0x1770
       blkdev_get_whole+0x380/0x510
       bdev_open+0x31e/0xd30
       blkdev_open+0x470/0x610
       do_dentry_open+0x785/0x14e0
       vfs_open+0x3b/0x340
       path_openat+0x2e08/0x3860
       do_file_open+0x23e/0x4a0
       do_sys_openat2+0x113/0x200
       __x64_sys_openat+0x138/0x170
       do_syscall_64+0x15f/0xf80
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-> #4 (&cmd->lock){+.+.}-{4:4}:
       __mutex_lock+0x1a3/0x1550
       nbd_queue_rq+0xc6/0x1100
       blk_mq_dispatch_rq_list+0xa70/0x1910
       __blk_mq_sched_dispatch_requests+0xdcc/0x1600
       blk_mq_sched_dispatch_requests+0xd7/0x190
       blk_mq_run_hw_queue+0x348/0x4f0
       blk_mq_dispatch_list+0xd16/0xe10
       blk_mq_flush_plug_list+0x48d/0x570
       __blk_flush_plug+0x3ed/0x4d0
       __submit_bio+0x28d/0x580
       submit_bio_noacct_nocheck+0x2f4/0xa40
       block_read_full_folio+0x599/0x830
       filemap_read_folio+0x137/0x3b0
       do_read_cache_folio+0x358/0x590
       read_part_sector+0xb6/0x2b0
       adfspart_check_ICS+0xb1/0x960
       bdev_disk_changed+0x817/0x1770
       blkdev_get_whole+0x380/0x510
       bdev_open+0x31e/0xd30
       blkdev_open+0x470/0x610
       do_dentry_open+0x785/0x14e0
       vfs_open+0x3b/0x340
       path_openat+0x2e08/0x3860
       do_file_open+0x23e/0x4a0
       do_sys_openat2+0x113/0x200
       __x64_sys_openat+0x138/0x170
       do_syscall_64+0x15f/0xf80
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-> #3 (set->srcu){.+.+}-{0:0}:
       __synchronize_srcu+0xca/0x300
       elevator_switch+0x1e8/0x7a0
       elevator_change+0x2cc/0x450
       elevator_set_default+0x36c/0x430
       blk_register_queue+0x3e9/0x4e0
       __add_disk+0x677/0xd50
       add_disk_fwnode+0xfb/0x480
       nbd_dev_add+0x72c/0xb50
       nbd_init+0x168/0x1f0
       do_one_initcall+0x250/0x870
       do_initcall_level+0x104/0x190
       do_initcalls+0x59/0xa0
       kernel_init_freeable+0x2a6/0x3e0
       kernel_init+0x1d/0x1d0
       ret_from_fork+0x514/0xb70
       ret_from_fork_asm+0x1a/0x30

-> #2 (&q->elevator_lock){+.+.}-{4:4}:
       __mutex_lock+0x1a3/0x1550
       elevator_change+0x1b3/0x450
       elevator_set_none+0xb5/0x140
       blk_mq_update_nr_hw_queues+0x5e7/0x1a60
       nbd_start_device+0x17f/0xb10
       nbd_genl_connect+0x165b/0x1cf0
       genl_family_rcv_msg_doit+0x22a/0x330
       genl_rcv_msg+0x61c/0x7a0
       netlink_rcv_skb+0x232/0x4b0
       genl_rcv+0x28/0x40
       netlink_unicast+0x75c/0x8e0
       netlink_sendmsg+0x813/0xb40
       ____sys_sendmsg+0x972/0x9f0
       ___sys_sendmsg+0x2a5/0x360
       __x64_sys_sendmsg+0x1bd/0x2a0
       do_syscall_64+0x15f/0xf80
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-> #1 (&q->q_usage_counter(io)#49){++++}-{0:0}:
       blk_alloc_queue+0x546/0x680
       __blk_mq_alloc_disk+0x197/0x390
       nbd_dev_add+0x499/0xb50
       nbd_init+0x168/0x1f0
       do_one_initcall+0x250/0x870
       do_initcall_level+0x104/0x190
       do_initcalls+0x59/0xa0
       kernel_init_freeable+0x2a6/0x3e0
       kernel_init+0x1d/0x1d0
       ret_from_fork+0x514/0xb70
       ret_from_fork_asm+0x1a/0x30

-> #0 (fs_reclaim){+.+.}-{0:0}:
       __lock_acquire+0x15a5/0x2cf0
       lock_acquire+0x106/0x350
       fs_reclaim_acquire+0x71/0x100
       __kmalloc_noprof+0x9d/0x760
       kobject_get_path+0xc5/0x2f0
       kobject_move+0x2f3/0x720
       device_move+0xe0/0x730
       hci_conn_del_sysfs+0xb8/0x1a0
       hci_conn_del+0xc36/0x1230
       hci_abort_conn_sync+0xdd0/0x1190
       hci_cmd_sync_work+0x213/0x400
       process_scheduled_works+0xb5d/0x1860
       worker_thread+0xa53/0xfc0
       kthread+0x388/0x470
       ret_from_fork+0x514/0xb70
       ret_from_fork_asm+0x1a/0x30

other info that might help us debug this:

Chain exists of:
  fs_reclaim --> cpuhp_state-up --> dpm_list_mtx

 Possible unsafe locking scenario:

       CPU0                    CPU1
       ----                    ----
  lock(dpm_list_mtx);
                               lock(cpuhp_state-up);
                               lock(dpm_list_mtx);
  lock(fs_reclaim);

 *** DEADLOCK ***

5 locks held by kworker/u11:0/53:
 #0: ffff888106b8b940 ((wq_completion)hci2){+.+.}-{0:0}, at: process_scheduled_works+0xa35/0x1860
 #1: ffffc90000bd7c40 ((work_completion)(&hdev->cmd_sync_work)){+.+.}-{0:0}, at: process_scheduled_works+0xa70/0x1860
 #2: ffff88811971cea0 (&hdev->req_lock){+.+.}-{4:4}, at: hci_cmd_sync_work+0x1d3/0x400
 #3: ffff88811971c0b8 (&hdev->lock){+.+.}-{4:4}, at: hci_abort_conn_sync+0xa6f/0x1190
 #4: ffffffff8f39f780 (dpm_list_mtx){+.+.}-{4:4}, at: device_move+0x3c/0x730

stack backtrace:
CPU: 0 UID: 0 PID: 53 Comm: kworker/u11:0 Not tainted syzkaller #0 PREEMPT(full) 
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-debian-1.16.2-1 04/01/2014
Workqueue: hci2 hci_cmd_sync_work
Call Trace:
 <TASK>
 dump_stack_lvl+0xe8/0x150
 print_circular_bug+0x2e1/0x300
 check_noncircular+0x12e/0x150
 __lock_acquire+0x15a5/0x2cf0
 lock_acquire+0x106/0x350
 fs_reclaim_acquire+0x71/0x100
 __kmalloc_noprof+0x9d/0x760
 kobject_get_path+0xc5/0x2f0
 kobject_move+0x2f3/0x720
 device_move+0xe0/0x730
 hci_conn_del_sysfs+0xb8/0x1a0
 hci_conn_del+0xc36/0x1230
 hci_abort_conn_sync+0xdd0/0x1190
 hci_cmd_sync_work+0x213/0x400
 process_scheduled_works+0xb5d/0x1860
 worker_thread+0xa53/0xfc0
 kthread+0x388/0x470
 ret_from_fork+0x514/0xb70
 ret_from_fork_asm+0x1a/0x30
 </TASK>
