2025/12/31 04:07:55 extracted 324514 text symbol hashes for base and 324514 for patched 2025/12/31 04:07:55 binaries are different, continuing fuzzing 2025/12/31 04:07:55 adding modified_functions to focus areas: ["__add_stripe_bio" "__bch_btree_node_write" "__bch_submit_bbio" "__bio_add_page" "__bio_advance" "__bio_alloc" "__bio_clone" "__bio_crypt_advance" "__bio_crypt_clone" "__bio_crypt_free_ctx" "__bio_integrity_endio" "__blk_crypto_bio_prep" "__blk_crypto_rq_bio_prep" "__blk_mq_alloc_requests" "__blk_mq_end_request" "__blk_rq_map_sg" "__blk_throtl_bio" "__blkdev_direct_IO" "__blkdev_issue_write_zeroes" "__blkdev_issue_zero_pages" "__blockdev_direct_IO" "__ceph_msg_data_cursor_init" "__end_swap_bio_read" "__end_swap_bio_write" "__f2fs_commit_super" "__journal_read_write" "__map_bio" "__multipath_map_bio" "__process_abnormal_io" "__process_bio_read_only" "__read_end_io" "__send_empty_flush" "__submit_bio" "__submit_discard_cmd" "__submit_zone_reset_cmd" "__swap_writepage" "__write_super" "accounted_begin" "add_ra_bio_pages" "alloc_tio" "aoe_end_buf" "aoe_end_request" "aoecmd_work" "async_copy_data" "async_io" "async_pmem_flush" "ata_rw_frameinit" "backing_request_endio" "bcache_write_super" "bch_bbio_endio" "bch_bio_alloc_pages" "bch_bio_map" "bch_btree_node_read" "bch_cache_read_endio" "bch_data_insert" "bch_data_insert_endio" "bch_data_insert_start" "bch_data_invalidate" "bch_moving_gc" "bch_write_bdev_super" "bdev_rw_virt" "bfq_actuator_index" "bfq_bic_update_cgroup" "bfq_bio_bfqg" "bfq_request_merge" "bfq_setup_cooperator" "bfqg_stats_update_legacy_io" "bio_add_folio_nofail" "bio_add_page" "bio_add_virt_nofail" "bio_alloc_bioset" "bio_alloc_cache_prune" "bio_alloc_clone" "bio_associate_blkg" "bio_associate_blkg_from_css" "bio_attempt_back_merge" "bio_attempt_discard_merge" "bio_attempt_front_merge" "bio_await_chain" "bio_blkcg_css" "bio_chain" "bio_chain_and_submit" "bio_check_pages_dirty" "bio_clone_blkg_association" "bio_cmd_bio_end_io" "bio_complete" "bio_copy_block" "bio_copy_data" "bio_copy_data_iter" "bio_copy_kern_endio_read" "bio_crypt_rq_ctx_compatible" "bio_crypt_set_ctx" "bio_dirty_fn" "bio_end_io_acct_remapped" "bio_endio" "bio_first_folio" "bio_free" "bio_free_pages" "bio_init" "bio_init_clone" "bio_integrity_add_page" "bio_integrity_advance" "bio_integrity_alloc" "bio_integrity_alloc_buf" "bio_integrity_clone" "bio_integrity_free" "bio_integrity_init" "bio_integrity_map_iter" "bio_integrity_map_user" "bio_integrity_prep" "bio_integrity_trim" "bio_integrity_unmap_user" "bio_integrity_verify_fn" "bio_iov_bvec_set" "bio_iov_iter_get_pages" "bio_iter_phys" "bio_poll" "bio_put" "bio_reset" "bio_seg_gap" "bio_set_pages_dirty" "bio_split" "bio_split_discard" "bio_split_io_at" "bio_split_rw" "bio_split_to_limits" "bio_split_write_zeroes" "bio_submit_split_bioset" "bio_trim" "bio_truncate" "bio_uninit" "bio_wait_end_io" "bio_will_gap" "bl_end_io_read" "bl_end_io_write" "bl_read_pagelist" "bl_write_pagelist" "blk_add_rq_to_plug" "blk_add_trace_bio" "blk_add_trace_bio_remap" "blk_add_trace_split" "blk_alloc_discard_bio" "blk_attempt_bio_merge" "blk_cgroup_bio_start" "blk_crypto_fallback_bio_prep" "blk_crypto_fallback_decrypt_bio" "blk_crypto_fallback_decrypt_endio" "blk_crypto_fallback_encrypt_endio" "blk_dump_rq_flags" "blk_flush_complete_seq" "blk_insert_cloned_request" "blk_integrity_complete" "blk_integrity_generate" "blk_integrity_merge_bio" "blk_integrity_merge_rq" "blk_integrity_prepare" "blk_integrity_verify_iter" "blk_map_iter_next" "blk_mq_add_hw_queues_cpuhp" "blk_mq_cancel_work_sync" "blk_mq_destroy_queue" "blk_mq_dispatch_list" "blk_mq_dispatch_queue_requests" "blk_mq_dispatch_rq_list" "blk_mq_dispatch_wake" "blk_mq_end_request_batch" "blk_mq_exit_queue" "blk_mq_hctx_notify_dead" "blk_mq_hctx_notify_offline" "blk_mq_hctx_notify_online" "blk_mq_init_allocated_queue" "blk_mq_insert_request" "blk_mq_map_swqueue" "blk_mq_release" "blk_mq_request_issue_directly" "blk_mq_requeue_work" "blk_mq_start_request" "blk_mq_submit_bio" "blk_mq_timeout_work" "blk_mq_try_issue_directly" "blk_mq_update_nr_requests" "blk_mq_update_tag_set_shared" "blk_next_bio" "blk_recalc_rq_segments" "blk_rq_append_bio" "blk_rq_count_integrity_sg" "blk_rq_cur_bytes" "blk_rq_dma_map_iter_start" "blk_rq_integrity_dma_map_iter_next" "blk_rq_integrity_dma_map_iter_start" "blk_rq_map_integrity_sg" "blk_rq_map_kern" "blk_rq_map_user_iov" "blk_rq_merge_ok" "blk_rq_prep_clone" "blk_rq_unmap_user" "blk_try_merge" "blk_update_request" "blk_zone_append_update_request_bio" "blk_zone_mgmt_bio_endio" "blk_zone_plug_bio" "blk_zone_wplug_bio_work" "blk_zone_wplug_prepare_bio" "blk_zone_write_plug_bio_endio" "blk_zone_write_plug_bio_merged" "blk_zone_write_plug_init_request" "blkcg_iolatency_done_bio" "blkcg_iolatency_throttle" "blkcg_punt_bio_submit" "blkcg_set_ioprio" "blkdev_bio_end_io" "blkdev_bio_end_io_async" "blkdev_direct_IO" "blkdev_issue_secure_erase" "blkdev_uring_cmd" "blkdev_zone_mgmt" "brd_submit_bio" "btree_csum_one_bio" "btree_node_read_endio" "btree_node_write_endio" "btrfs_alloc_dummy_sum" "btrfs_check_read_bio" "btrfs_csum_one_bio" "btrfs_decompress_buf2page" "btrfs_dio_end_io" "btrfs_dio_submit_io" "btrfs_encoded_read_regular_fill_pages" "btrfs_end_empty_barrier" "btrfs_end_super_write" "btrfs_lookup_bio_sums" "btrfs_raid56_end_io" "btrfs_record_physical_zoned" "btrfs_repair_io_failure" "btrfs_simple_end_io" "btrfs_submit_bbio" "btrfs_submit_bio" "btrfs_submit_compressed_read" "btrfs_submit_compressed_write" "btrfs_submit_dev_bio" "btrfs_submit_repair_write" "btrfs_use_zone_append" "btt_submit_bio" "cache_lookup" "cache_lookup_fn" "cache_map" "cached_dev_cache_miss" "cached_dev_nodata" "cached_dev_read_done" "cached_dev_read_error" "cached_dev_submit_bio" "ceph_msg_data_advance" "ceph_msg_data_next" "clone_bio" "clone_free" "clone_map" "clone_write_end_io_work" "cmp_cells" "complete_discard_bio" "copy_bio_to_actor" "corrupt_bio_common" "crypt_alloc_buffer" "crypt_convert" "crypt_endio" "crypt_free_buffer_pages" "crypt_map" "csum_one_bio" "dd_request_merge" "dec_in_flight" "detached_dev_end_io" "dio_bio_end_aio" "dio_bio_end_io" "dio_new_bio" "dio_send_cur_page" "dirty_endio" "dm_accept_partial_bio" "dm_bio_restore" "dm_crypt_integrity_io_alloc" "dm_integrity_check" "dm_integrity_check_limits" "dm_integrity_inline_recheck" "dm_integrity_map" "dm_integrity_map_continue" "dm_integrity_map_inline" "dm_io" "dm_io_acct" "dm_io_rewind" "dm_is_zone_write" "dm_poll_bio" "dm_rh_bio_to_region" "dm_rh_delay" "dm_rh_inc_pending" "dm_rh_mark_nosync" "dm_rq_bio_constructor" "dm_submit_bio" "dm_zone_endio" "dmz_chunk_work" "dmz_clone_endio" "dmz_get_mblock" "dmz_map" "dmz_mblock_bio_end_io" "dmz_rdwr_block" "dmz_submit_bio" "dmz_write_dirty_mblocks" "do_add_page_to_bio" "do_mirror" "do_mpage_readpage" "do_worker" "elv_merge" "end_bbio_compressed_read" "end_bbio_data_read" "end_bio_bh_io_sync" "end_clone_bio" "end_reshape_read" "end_reshape_write" "end_sync_read" "end_sync_write" "endio" "ext4_bio_write_folio" "ext4_end_bio" "ext4_mpage_readpages" "ext4_release_io_end" "f2fs_compress_write_end_io" "f2fs_finish_read_bio" "f2fs_grab_read_bio" "f2fs_merge_page_bio" "f2fs_mpage_readpages" "f2fs_post_read_work" "f2fs_read_end_io" "f2fs_read_multi_pages" "f2fs_submit_discard_endio" "f2fs_submit_page_write" "f2fs_verify_bio" "f2fs_write_end_io" "f2fs_zone_write_end_io" "flakey_map" "flash_dev_cache_miss" "flash_dev_submit_bio" "fscrypt_zeroout_range" "fsverity_verify_bio" "full_bio_end_io" "get_bio_sector_nr" "gfs2_end_log_write" "gfs2_find_jhead" "gfs2_log_get_bio" "gfs2_log_submit_bio" "gfs2_meta_read" "guard_bio_eod" "handle_failed_stripe" "handle_stripe" "handle_stripe_clean_event" "hib_end_io" "hydration_overwrite" "inc_remap_and_issue_cell" "index_rbio_pages" "integrity_end_io" "integrity_metadata" "integrity_recalc_inline" "integrity_recheck" "io_buffer_register_bvec" "io_free_rsrc_node" "io_import_reg_buf" "ioc_rqos_done_bio" "ioc_rqos_merge" "ioc_rqos_throttle" "iocg_commit_bio" "iomap_add_to_ioend" "iomap_bio_read_folio_range" "iomap_bio_read_folio_range_sync" "iomap_dio_bio_end_io" "iomap_dio_bio_iter" "iomap_dio_zero" "iomap_finish_ioend_direct" "iomap_init_ioend" "iomap_ioend_writeback_submit" "iomap_split_ioend" "issue_op" "journal_read_bucket" "journal_read_endio" "journal_write_endio" "journal_write_unlocked" "kcryptd_async_done" "kcryptd_crypt" "kcryptd_crypt_read_continue" "kcryptd_io_read" "ktio" "lbmIODone" "lbmRead" "lbmStartIO" "linear_map" "ll_back_merge_fn" "ll_merge_requests_fn" "lo_complete_rq" "lo_rw_aio" "lz4_uncompress" "lzo_uncompress" "make_discard_request" "map_bio" "md_account_bio" "md_end_clone_io" "md_end_flush" "md_flush_request" "md_free_cloned_bio" "md_handle_request" "md_submit_bio" "md_submit_discard_bio" "md_write_metadata" "metapage_read_end_io" "metapage_read_folio" "metapage_write_end_io" "metapage_write_folio" "mg_copy" "mirror_map" "mpage_end_io" "mpage_read_folio" "mpage_readahead" "mpage_writepages" "mtd_queue_rq" "multipath_map_bio" "multipath_prepare_ioctl" "nbd_send_cmd" "nilfs_end_bio_write" "nilfs_segbuf_submit_bh" "nilfs_write_logs" "ntfs_bio_fill_1" "null_handle_memory_backed" "nvme_failover_req" "nvme_ns_head_submit_bio" "nvme_prep_rq" "nvme_setup_discard" "nvme_setup_rw" "nvme_tcp_queue_rq" "nvme_tcp_recv_skb" "nvme_tcp_try_send" "nvme_unmap_metadata" "nvmet_bdev_alloc_bip" "nvmet_bdev_execute_dsm" "nvmet_bdev_execute_flush" "nvmet_bdev_execute_rw" "nvmet_bdev_execute_write_zeroes" "nvmet_bdev_execute_zone_append" "nvmet_bdev_zmgmt_send_work" "nvmet_bdev_zone_append_bio_done" "nvmet_bio_done" "o2hb_bio_end_io" "o2hb_setup_one_bio" "ops_complete_biofill" "optimisable_bio" "orig_write_end_io_work" "origin_map" "overwrite_endio" "passdown_endio" "pending_complete" "perf_trace_bcache_bio" "perf_trace_bcache_journal_write" "perf_trace_bcache_read" "perf_trace_bcache_request" "perf_trace_bcache_write" "perf_trace_blkdev_zone_mgmt" "perf_trace_block_bio" "perf_trace_block_bio_complete" "perf_trace_block_bio_remap" "perf_trace_block_split" "perf_trace_btrfs_raid56_bio" "perf_trace_f2fs__bio" "plug_cmp" "pmem_submit_bio" "ppl_flush_endio" "ppl_handle_flush_request" "ppl_log_endio" "ppl_stripe_write_finished" "ppl_write_stripe_run" "prio_endio" "prio_io" "process_bio" "process_cell" "process_deferred_bios" "process_discard_bio" "process_prepared_discard_passdown_pt1" "process_prepared_mapping" "process_queued_bios" "put_buf" "r10buf_pool_alloc" "r10buf_pool_free" "r1buf_pool_alloc" "r1buf_pool_free" "r5c_handle_cached_data_endio" "r5l_append_payload_page" "r5l_flush_stripe_to_raid" "r5l_get_meta" "r5l_handle_flush_request" "r5l_log_endio" "r5l_recovery_read_page" "raid0_handle_discard" "raid0_make_request" "raid10_alloc_init_r10buf" "raid10_end_discard_request" "raid10_end_read_request" "raid10_end_write_request" "raid10_handle_discard" "raid10_make_request" "raid10_read_request" "raid10_sync_request" "raid10_write_one_disk" "raid10d" "raid1_end_read_request" "raid1_end_write_request" "raid1_make_request" "raid1_read_request" "raid1_sync_request" "raid1d" "raid56_parity_alloc_scrub_rbio" "raid56_parity_recover" "raid56_scrub_wait_endio" "raid5_align_endio" "raid5_end_read_request" "raid5_end_write_request" "raid5_make_request" "raid5d" "raid_map" "raid_wait_read_end_io" "raid_wait_write_end_io" "rbio_add_bio" "rbio_add_io_paddrs" "rbio_update_error_bitmap" "read_dirty" "read_dirty_endio" "read_extent_buffer_pages_nowait" "read_moving_endio" "recv_work" "regular_request_wait" "remap" "remap_and_issue_overwrite" "remap_and_issue_shared_cell" "remap_exception" "remap_to_origin_and_cache" "req_attempt_discard_merge" "request_endio" "schedule_copy" "schedule_zero" "scrub_read_endio" "scrub_repair_read_endio" "scrub_stripe" "scrub_stripe_submit_repair_read" "scrub_submit_initial_read" "scrub_submit_write_bio" "scrub_write_endio" "scrub_write_sectors" "scsi_io_completion_action" "sd_setup_protect_cmnd" "should_fail_bio" "simple_end_io_work" "snapshot_map" "snapshot_merge_map" "squashfs_bio_read" "squashfs_read_data" "squashfs_xz_uncompress" "sr_done" "start_full_bio" "stripe_map" "stripe_map_range" "submit_bh_wbc" "submit_bio" "submit_bio_noacct" "submit_bio_wait" "submit_bio_wait_endio" "submit_extent_folio" "submit_io" "submit_one_bio" "submit_page_section" "submit_read_wait_bio_list" "submit_write_bios" "super_written" "swap_read_folio" "swap_read_page" "sync_page_io" "tg_dispatch_bps_time" "tg_dispatch_time" "thin_map" "trace_event_raw_event_bcache_bio" "trace_event_raw_event_bcache_journal_write" "trace_event_raw_event_bcache_read" "trace_event_raw_event_bcache_request" "trace_event_raw_event_bcache_write" "trace_event_raw_event_blkdev_zone_mgmt" "trace_event_raw_event_block_bio" "trace_event_raw_event_block_bio_complete" "trace_event_raw_event_block_bio_remap" "trace_event_raw_event_block_split" "trace_event_raw_event_btrfs_raid56_bio" "trace_event_raw_event_f2fs__bio" "uuid_endio" "uuid_io" "verity_end_io" "verity_finish_io" "verity_map" "verity_verify_io" "virtblk_prep_rq" "wc_add_block" "write_all_supers" "write_bdev_super_endio" "write_dirty" "write_dirty_finish" "write_moving" "write_one_eb" "write_page" "write_super_endio" "writecache_end_io" "writecache_flush_thread" "writecache_map" "writecache_map_discard" "writecache_writeback" "xfs_buf_bio_end_io" "xfs_buf_submit" "xfs_dio_zoned_submit_io" "xfs_discard_endio" "xfs_discard_extents" "xfs_end_bio" "xfs_mark_rtg_boundary" "xfs_rw_bdev" "xfs_submit_zoned_bio" "xfs_writeback_submit" "xfs_zone_alloc_and_submit" "xfs_zone_gc_prepare_reset" "xfs_zoned_gcd" "xfs_zoned_writeback_submit" "xlog_bio_end_io" "xlog_write_iclog" "z_erofs_endio" "z_erofs_runqueue" "zero_callback" "zero_exception" "zero_fill_bio_iter" "zero_map" "zlib_uncompress" "zram_submit_bio" "zstd_uncompress"] 2025/12/31 04:07:55 adding directly modified files to focus areas: ["block/bio.c" "block/blk.h" "include/linux/blk_types.h" "io_uring/rsrc.c"] 2025/12/31 04:07:55 downloading corpus #1: "https://storage.googleapis.com/syzkaller/corpus/ci-upstream-kasan-gce-root-corpus.db" 2025/12/31 04:07:56 downloading corpus #2: "https://storage.googleapis.com/syzkaller/corpus/ci2-upstream-fs-corpus.db" 2025/12/31 04:07:57 merging corpuses 2025/12/31 04:09:07 runner 4 connected 2025/12/31 04:09:08 runner 8 connected 2025/12/31 04:09:08 runner 7 connected 2025/12/31 04:09:09 runner 3 connected 2025/12/31 04:09:09 runner 0 connected 2025/12/31 04:09:09 runner 5 connected 2025/12/31 04:09:09 runner 6 connected 2025/12/31 04:09:14 runner 2 connected 2025/12/31 04:09:15 initializing coverage information... 2025/12/31 04:09:15 runner 1 connected 2025/12/31 04:09:15 runner 1 connected 2025/12/31 04:09:16 runner 0 connected 2025/12/31 04:09:16 runner 2 connected 2025/12/31 04:09:17 executor cover filter: 0 PCs 2025/12/31 04:09:20 discovered 7639 source files, 335930 symbols 2025/12/31 04:09:23 machine check: disabled the following syscalls: mount$esdfs : /proc/filesystems does not contain esdfs mount$incfs : /proc/filesystems does not contain incremental-fs openat$acpi_thermal_rel : failed to open /dev/acpi_thermal_rel: no such file or directory openat$ashmem : failed to open /dev/ashmem: no such file or directory openat$bifrost : failed to open /dev/bifrost: no such file or directory openat$binder : failed to open /dev/binder: no such file or directory openat$camx : failed to open /dev/v4l/by-path/platform-soc@0:qcom_cam-req-mgr-video-index0: no such file or directory openat$capi20 : failed to open /dev/capi20: no such file or directory openat$cdrom1 : failed to open /dev/cdrom1: no such file or directory openat$damon_attrs : failed to open /sys/kernel/debug/damon/attrs: no such file or directory openat$damon_init_regions : failed to open /sys/kernel/debug/damon/init_regions: no such file or directory openat$damon_kdamond_pid : failed to open /sys/kernel/debug/damon/kdamond_pid: no such file or directory openat$damon_mk_contexts : failed to open /sys/kernel/debug/damon/mk_contexts: no such file or directory openat$damon_monitor_on : failed to open /sys/kernel/debug/damon/monitor_on: no such file or directory openat$damon_rm_contexts : failed to open /sys/kernel/debug/damon/rm_contexts: no such file or directory openat$damon_schemes : failed to open /sys/kernel/debug/damon/schemes: no such file or directory openat$damon_target_ids : failed to open /sys/kernel/debug/damon/target_ids: no such file or directory openat$hwbinder : failed to open /dev/hwbinder: no such file or directory openat$i915 : failed to open /dev/i915: no such file or directory openat$img_rogue : failed to open /dev/img-rogue: no such file or directory openat$irnet : failed to open /dev/irnet: no such file or directory openat$keychord : failed to open /dev/keychord: no such file or directory openat$kvm : failed to open /dev/kvm: no such file or directory openat$lightnvm : failed to open /dev/lightnvm/control: no such file or directory openat$mali : failed to open /dev/mali0: no such file or directory openat$md : failed to open /dev/md0: no such file or directory openat$msm : failed to open /dev/msm: no such file or directory openat$ndctl0 : failed to open /dev/ndctl0: no such file or directory openat$nmem0 : failed to open /dev/nmem0: no such file or directory openat$pktcdvd : failed to open /dev/pktcdvd/control: no such file or directory openat$pmem0 : failed to open /dev/pmem0: no such file or directory openat$proc_capi20 : failed to open /proc/capi/capi20: no such file or directory openat$proc_capi20ncci : failed to open /proc/capi/capi20ncci: no such file or directory openat$proc_reclaim : failed to open /proc/self/reclaim: no such file or directory openat$ptp1 : failed to open /dev/ptp1: no such file or directory openat$rnullb : failed to open /dev/rnullb0: no such file or directory openat$selinux_access : failed to open /selinux/access: no such file or directory openat$selinux_attr : selinux is not enabled openat$selinux_avc_cache_stats : failed to open /selinux/avc/cache_stats: no such file or directory openat$selinux_avc_cache_threshold : failed to open /selinux/avc/cache_threshold: no such file or directory openat$selinux_avc_hash_stats : failed to open /selinux/avc/hash_stats: no such file or directory openat$selinux_checkreqprot : failed to open /selinux/checkreqprot: no such file or directory openat$selinux_commit_pending_bools : failed to open /selinux/commit_pending_bools: no such file or directory openat$selinux_context : failed to open /selinux/context: no such file or directory openat$selinux_create : failed to open /selinux/create: no such file or directory openat$selinux_enforce : failed to open /selinux/enforce: no such file or directory openat$selinux_load : failed to open /selinux/load: no such file or directory openat$selinux_member : failed to open /selinux/member: no such file or directory openat$selinux_mls : failed to open /selinux/mls: no such file or directory openat$selinux_policy : failed to open /selinux/policy: no such file or directory openat$selinux_relabel : failed to open /selinux/relabel: no such file or directory openat$selinux_status : failed to open /selinux/status: no such file or directory openat$selinux_user : failed to open /selinux/user: no such file or directory openat$selinux_validatetrans : failed to open /selinux/validatetrans: no such file or directory openat$sev : failed to open /dev/sev: no such file or directory openat$sgx_provision : failed to open /dev/sgx_provision: no such file or directory openat$smack_task_current : smack is not enabled openat$smack_thread_current : smack is not enabled openat$smackfs_access : failed to open /sys/fs/smackfs/access: no such file or directory openat$smackfs_ambient : failed to open /sys/fs/smackfs/ambient: no such file or directory openat$smackfs_change_rule : failed to open /sys/fs/smackfs/change-rule: no such file or directory openat$smackfs_cipso : failed to open /sys/fs/smackfs/cipso: no such file or directory openat$smackfs_cipsonum : failed to open /sys/fs/smackfs/direct: no such file or directory openat$smackfs_ipv6host : failed to open /sys/fs/smackfs/ipv6host: no such file or directory openat$smackfs_load : failed to open /sys/fs/smackfs/load: no such file or directory openat$smackfs_logging : failed to open /sys/fs/smackfs/logging: no such file or directory openat$smackfs_netlabel : failed to open /sys/fs/smackfs/netlabel: no such file or directory openat$smackfs_onlycap : failed to open /sys/fs/smackfs/onlycap: no such file or directory openat$smackfs_ptrace : failed to open /sys/fs/smackfs/ptrace: no such file or directory openat$smackfs_relabel_self : failed to open /sys/fs/smackfs/relabel-self: no such file or directory openat$smackfs_revoke_subject : failed to open /sys/fs/smackfs/revoke-subject: no such file or directory openat$smackfs_syslog : failed to open /sys/fs/smackfs/syslog: no such file or directory openat$smackfs_unconfined : failed to open /sys/fs/smackfs/unconfined: no such file or directory openat$tlk_device : failed to open /dev/tlk_device: no such file or directory openat$trusty : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_avb : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_gatekeeper : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwkey : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwrng : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km_secure : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_storage : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$tty : failed to open /dev/tty: no such device or address openat$uverbs0 : failed to open /dev/infiniband/uverbs0: no such file or directory openat$vfio : failed to open /dev/vfio/vfio: no such file or directory openat$vndbinder : failed to open /dev/vndbinder: no such file or directory openat$vtpm : failed to open /dev/vtpmx: no such file or directory openat$xenevtchn : failed to open /dev/xen/evtchn: no such file or directory openat$zygote : failed to open /dev/socket/zygote: no such file or directory read$smackfs_access : smack is not enabled read$smackfs_cipsonum : smack is not enabled read$smackfs_logging : smack is not enabled read$smackfs_ptrace : smack is not enabled socket$hf : socket$hf(0x13, 0x2, 0x0) failed: address family not supported by protocol socket$inet6_dccp : socket$inet6_dccp(0xa, 0x6, 0x0) failed: socket type not supported socket$inet_dccp : socket$inet_dccp(0x2, 0x6, 0x0) failed: socket type not supported socket$vsock_dgram : socket$vsock_dgram(0x28, 0x2, 0x0) failed: no such device syz_mount_image$bcachefs : /proc/filesystems does not contain bcachefs syz_mount_image$ntfs : /proc/filesystems does not contain ntfs syz_mount_image$reiserfs : /proc/filesystems does not contain reiserfs syz_mount_image$sysv : /proc/filesystems does not contain sysv syz_mount_image$v7 : /proc/filesystems does not contain v7 write$selinux_access : selinux is not enabled write$selinux_attr : selinux is not enabled write$selinux_context : selinux is not enabled write$selinux_create : selinux is not enabled write$selinux_load : selinux is not enabled write$selinux_user : selinux is not enabled write$selinux_validatetrans : selinux is not enabled write$smack_current : smack is not enabled write$smackfs_access : smack is not enabled write$smackfs_change_rule : smack is not enabled write$smackfs_cipso : smack is not enabled write$smackfs_cipsonum : smack is not enabled write$smackfs_ipv6host : smack is not enabled write$smackfs_label : smack is not enabled write$smackfs_labels_list : smack is not enabled write$smackfs_load : smack is not enabled write$smackfs_logging : smack is not enabled write$smackfs_netlabel : smack is not enabled write$smackfs_ptrace : smack is not enabled transitively disabled the following syscalls (missing resource [creating syscalls]): accept$ax25 : sock_ax25 [accept$ax25 accept4$ax25 syz_init_net_socket$ax25] accept$netrom : sock_netrom [accept$netrom accept4$netrom syz_init_net_socket$netrom] accept$nfc_llcp : sock_nfc_llcp [accept$nfc_llcp accept4$nfc_llcp syz_init_net_socket$nfc_llcp] close$binfmt : fd_binfmt [openat$binfmt] close$fd_v4l2_buffer : fd_v4l2_buffer [ioctl$VIDIOC_QUERYBUF_DMABUF] close$ibv_device : fd_rdma [openat$uverbs0] futimesat : time_usec [getitimer getrusage getsockopt$sock_timeval ...] mmap$DRM_I915 : fd_i915 [openat$i915] mmap$DRM_MSM : fd_msm [openat$msm] mmap$KVM_VCPU : vcpu_mmap_size [ioctl$KVM_GET_VCPU_MMAP_SIZE] mmap$bifrost : fd_bifrost [openat$bifrost openat$mali] mmap$perf : fd_perf [perf_event_open perf_event_open$cgroup] mmap$snddsp : fd_snd_dsp [syz_open_dev$sndpcmc syz_open_dev$sndpcmp] mmap$snddsp_control : fd_snd_dsp [syz_open_dev$sndpcmc syz_open_dev$sndpcmp] mmap$snddsp_status : fd_snd_dsp [syz_open_dev$sndpcmc syz_open_dev$sndpcmp] mmap$usbfs : fd_usbfs [syz_open_dev$usbfs] mmap$usbmon : fd_usbmon [syz_open_dev$usbmon] mount$9p_fd : rfd9p [pipe2$9p] openat$binfmt : ptr_binfmt_file [syz_create_resource$binfmt] read$char_usb : fd_char_usb [syz_open_dev$char_usb] read$hiddev : fd_hiddev [syz_open_dev$hiddev] read$hidraw : fd_hidraw [syz_open_dev$hidraw] read$midi : fd_midi [syz_open_dev$admmidi syz_open_dev$amidi syz_open_dev$dmmidi syz_open_dev$midi syz_open_dev$sndmidi] read$msr : fd_msr [syz_open_dev$MSR] read$snddsp : fd_snd_dsp [syz_open_dev$sndpcmc syz_open_dev$sndpcmp] read$sndhw : fd_snd_hw [syz_open_dev$sndhw] read$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] read$usbfs : fd_usbfs [syz_open_dev$usbfs] read$usbmon : fd_usbmon [syz_open_dev$usbmon] read$watch_queue : fd_watch_queue [pipe2$watch_queue] select : time_usec [getitimer getrusage getsockopt$sock_timeval ...] setsockopt$IP6T_SO_SET_REPLACE : fd_bpf_prog [bpf$BPF_PROG_GET_FD_BY_ID bpf$BPF_PROG_RAW_TRACEPOINT_LOAD bpf$BPF_PROG_WITH_BTFID_LOAD ...] setsockopt$IPT_SO_SET_REPLACE : fd_bpf_prog [bpf$BPF_PROG_GET_FD_BY_ID bpf$BPF_PROG_RAW_TRACEPOINT_LOAD bpf$BPF_PROG_WITH_BTFID_LOAD ...] setsockopt$SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: time_usec [getitimer getrusage getsockopt$sock_timeval ...] setsockopt$WPAN_SECURITY : sock_802154_dgram [syz_init_net_socket$802154_dgram] setsockopt$WPAN_SECURITY_LEVEL : sock_802154_dgram [syz_init_net_socket$802154_dgram] setsockopt$WPAN_WANTACK : sock_802154_dgram [syz_init_net_socket$802154_dgram] setsockopt$WPAN_WANTLQI : sock_802154_dgram [syz_init_net_socket$802154_dgram] setsockopt$X25_QBITINCL : sock_x25 [accept4$x25 syz_init_net_socket$x25] setsockopt$ax25_SO_BINDTODEVICE : sock_ax25 [accept$ax25 accept4$ax25 syz_init_net_socket$ax25] setsockopt$ax25_int : sock_ax25 [accept$ax25 accept4$ax25 syz_init_net_socket$ax25] setsockopt$bt_BT_CHANNEL_POLICY : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_BT_DEFER_SETUP : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_BT_FLUSHABLE : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_BT_POWER : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_BT_RCVMTU : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_BT_SECURITY : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_BT_SNDMTU : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_BT_VOICE : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_hci_HCI_DATA_DIR : sock_bt_hci [syz_init_net_socket$bt_hci] setsockopt$bt_hci_HCI_FILTER : sock_bt_hci [syz_init_net_socket$bt_hci] setsockopt$bt_hci_HCI_TIME_STAMP : sock_bt_hci [syz_init_net_socket$bt_hci] setsockopt$bt_l2cap_L2CAP_CONNINFO : sock_bt_l2cap [accept4$bt_l2cap syz_init_net_socket$bt_l2cap] setsockopt$bt_l2cap_L2CAP_LM : sock_bt_l2cap [accept4$bt_l2cap syz_init_net_socket$bt_l2cap] setsockopt$bt_l2cap_L2CAP_OPTIONS : sock_bt_l2cap [accept4$bt_l2cap syz_init_net_socket$bt_l2cap] setsockopt$bt_rfcomm_RFCOMM_LM : sock_bt_rfcomm [syz_init_net_socket$bt_rfcomm] setsockopt$inet6_dccp_buf : sock_dccp6 [socket$inet6_dccp] setsockopt$inet6_dccp_int : sock_dccp6 [socket$inet6_dccp] setsockopt$inet_dccp_buf : sock_dccp [socket$inet_dccp] setsockopt$inet_dccp_int : sock_dccp [socket$inet_dccp] setsockopt$inet_sctp6_SCTP_ADD_STREAMS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_ASSOCINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_AUTH_DEACTIVATE_KEY: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_AUTH_DELETE_KEY : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_AUTH_KEY : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_CONTEXT : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_DEFAULT_PRINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_DEFAULT_SEND_PARAM: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_DEFAULT_SNDINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_DELAYED_SACK : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_ENABLE_STREAM_RESET: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_MAXSEG : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_MAX_BURST : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_PEER_ADDR_THLDS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_PRIMARY_ADDR : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_PR_SUPPORTED : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_RECONFIG_SUPPORTED: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_RESET_ASSOC : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_RESET_STREAMS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_RTOINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_SET_PEER_PRIMARY_ADDR: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_STREAM_SCHEDULER : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_STREAM_SCHEDULER_VALUE: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_ADD_STREAMS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_ASSOCINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_AUTH_ACTIVE_KEY : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_AUTH_DEACTIVATE_KEY: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_AUTH_DELETE_KEY : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_AUTH_KEY : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_CONTEXT : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_DEFAULT_PRINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_DEFAULT_SEND_PARAM: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_DEFAULT_SNDINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_DELAYED_SACK : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_ENABLE_STREAM_RESET: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_MAXSEG : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_MAX_BURST : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_PEER_ADDR_PARAMS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_PEER_ADDR_THLDS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_PRIMARY_ADDR : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_PR_SUPPORTED : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_RECONFIG_SUPPORTED: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_RESET_ASSOC : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_RESET_STREAMS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_RTOINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_SET_PEER_PRIMARY_ADDR: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_STREAM_SCHEDULER : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_STREAM_SCHEDULER_VALUE: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$llc_int : sock_llc [accept4$llc syz_init_net_socket$llc] setsockopt$netrom_NETROM_IDLE : sock_netrom [accept$netrom accept4$netrom syz_init_net_socket$netrom] setsockopt$netrom_NETROM_N2 : sock_netrom [accept$netrom accept4$netrom syz_init_net_socket$netrom] setsockopt$netrom_NETROM_T1 : sock_netrom [accept$netrom accept4$netrom syz_init_net_socket$netrom] setsockopt$netrom_NETROM_T2 : sock_netrom [accept$netrom accept4$netrom syz_init_net_socket$netrom] setsockopt$netrom_NETROM_T4 : sock_netrom [accept$netrom accept4$netrom syz_init_net_socket$netrom] setsockopt$nfc_llcp_NFC_LLCP_MIUX : sock_nfc_llcp [accept$nfc_llcp accept4$nfc_llcp syz_init_net_socket$nfc_llcp] setsockopt$nfc_llcp_NFC_LLCP_RW : sock_nfc_llcp [accept$nfc_llcp accept4$nfc_llcp syz_init_net_socket$nfc_llcp] setsockopt$rose : sock_rose [accept4$rose syz_init_net_socket$rose] setsockopt$sock_attach_bpf : fd_bpf_prog [bpf$BPF_PROG_GET_FD_BY_ID bpf$BPF_PROG_RAW_TRACEPOINT_LOAD bpf$BPF_PROG_WITH_BTFID_LOAD ...] setsockopt$sock_timeval : time_usec [getitimer getrusage getsockopt$sock_timeval ...] syz_memcpy_off$KVM_EXIT_HYPERCALL : kvm_run_ptr [mmap$KVM_VCPU] syz_memcpy_off$KVM_EXIT_MMIO : kvm_run_ptr [mmap$KVM_VCPU] utimensat : time_usec [getitimer getrusage getsockopt$sock_timeval ...] utimes : time_usec [getitimer getrusage getsockopt$sock_timeval ...] write$9p : wfd9p [pipe2$9p] write$ALLOC_MW : fd_rdma [openat$uverbs0] write$ALLOC_PD : fd_rdma [openat$uverbs0] write$ATTACH_MCAST : fd_rdma [openat$uverbs0] write$CLOSE_XRCD : fd_rdma [openat$uverbs0] write$CREATE_AH : fd_rdma [openat$uverbs0] write$CREATE_COMP_CHANNEL : fd_rdma [openat$uverbs0] write$CREATE_CQ : fd_rdma [openat$uverbs0] write$CREATE_CQ_EX : fd_rdma [openat$uverbs0] write$CREATE_FLOW : fd_rdma [openat$uverbs0] write$CREATE_QP : fd_rdma [openat$uverbs0] write$CREATE_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$CREATE_SRQ : fd_rdma [openat$uverbs0] write$CREATE_WQ : fd_rdma [openat$uverbs0] write$DEALLOC_MW : fd_rdma [openat$uverbs0] write$DEALLOC_PD : fd_rdma [openat$uverbs0] write$DEREG_MR : fd_rdma [openat$uverbs0] write$DESTROY_AH : fd_rdma [openat$uverbs0] write$DESTROY_CQ : fd_rdma [openat$uverbs0] write$DESTROY_FLOW : fd_rdma [openat$uverbs0] write$DESTROY_QP : fd_rdma [openat$uverbs0] write$DESTROY_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$DESTROY_SRQ : fd_rdma [openat$uverbs0] write$DESTROY_WQ : fd_rdma [openat$uverbs0] write$DETACH_MCAST : fd_rdma [openat$uverbs0] write$MLX5_ALLOC_PD : fd_rdma [openat$uverbs0] write$MLX5_CREATE_CQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_DV_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_SRQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_WQ : fd_rdma [openat$uverbs0] write$MLX5_GET_CONTEXT : fd_rdma [openat$uverbs0] write$MLX5_MODIFY_WQ : fd_rdma [openat$uverbs0] write$MODIFY_QP : fd_rdma [openat$uverbs0] write$MODIFY_SRQ : fd_rdma [openat$uverbs0] write$OPEN_XRCD : fd_rdma [openat$uverbs0] write$P9_RATTACH : wfd9p [pipe2$9p] write$P9_RAUTH : wfd9p [pipe2$9p] write$P9_RCLUNK : wfd9p [pipe2$9p] write$P9_RCREATE : wfd9p [pipe2$9p] write$P9_RFLUSH : wfd9p [pipe2$9p] write$P9_RFSYNC : wfd9p [pipe2$9p] write$P9_RGETATTR : wfd9p [pipe2$9p] write$P9_RGETLOCK : wfd9p [pipe2$9p] write$P9_RLCREATE : wfd9p [pipe2$9p] write$P9_RLERROR : wfd9p [pipe2$9p] write$P9_RLERRORu : wfd9p [pipe2$9p] write$P9_RLINK : wfd9p [pipe2$9p] write$P9_RLOCK : wfd9p [pipe2$9p] write$P9_RLOPEN : wfd9p [pipe2$9p] write$P9_RMKDIR : wfd9p [pipe2$9p] write$P9_RMKNOD : wfd9p [pipe2$9p] write$P9_ROPEN : wfd9p [pipe2$9p] write$P9_RREAD : wfd9p [pipe2$9p] write$P9_RREADDIR : wfd9p [pipe2$9p] write$P9_RREADLINK : wfd9p [pipe2$9p] write$P9_RREMOVE : wfd9p [pipe2$9p] write$P9_RRENAME : wfd9p [pipe2$9p] write$P9_RRENAMEAT : wfd9p [pipe2$9p] write$P9_RSETATTR : wfd9p [pipe2$9p] write$P9_RSTAT : wfd9p [pipe2$9p] write$P9_RSTATFS : wfd9p [pipe2$9p] write$P9_RSTATu : wfd9p [pipe2$9p] write$P9_RSYMLINK : wfd9p [pipe2$9p] write$P9_RUNLINKAT : wfd9p [pipe2$9p] write$P9_RVERSION : wfd9p [pipe2$9p] write$P9_RWALK : wfd9p [pipe2$9p] write$P9_RWRITE : wfd9p [pipe2$9p] write$P9_RWSTAT : wfd9p [pipe2$9p] write$P9_RXATTRCREATE : wfd9p [pipe2$9p] write$P9_RXATTRWALK : wfd9p [pipe2$9p] write$POLL_CQ : fd_rdma [openat$uverbs0] write$POST_RECV : fd_rdma [openat$uverbs0] write$POST_SEND : fd_rdma [openat$uverbs0] write$POST_SRQ_RECV : fd_rdma [openat$uverbs0] write$QUERY_DEVICE_EX : fd_rdma [openat$uverbs0] write$QUERY_PORT : fd_rdma [openat$uverbs0] write$QUERY_QP : fd_rdma [openat$uverbs0] write$QUERY_SRQ : fd_rdma [openat$uverbs0] write$REG_MR : fd_rdma [openat$uverbs0] write$REQ_NOTIFY_CQ : fd_rdma [openat$uverbs0] write$REREG_MR : fd_rdma [openat$uverbs0] write$RESIZE_CQ : fd_rdma [openat$uverbs0] write$binfmt_aout : fd_binfmt [openat$binfmt] write$binfmt_elf32 : fd_binfmt [openat$binfmt] write$binfmt_elf64 : fd_binfmt [openat$binfmt] write$binfmt_misc : fd_binfmt [openat$binfmt] write$binfmt_script : fd_binfmt [openat$binfmt] write$bt_hci : sock_bt_hci [syz_init_net_socket$bt_hci] write$capi20 : fd_capi20 [openat$capi20] write$capi20_data : fd_capi20 [openat$capi20] write$char_usb : fd_char_usb [syz_open_dev$char_usb] write$damon_attrs : fd_damon_attrs [openat$damon_attrs] write$damon_contexts : fd_damon_contexts [openat$damon_mk_contexts openat$damon_rm_contexts] write$damon_init_regions : fd_damon_init_regions [openat$damon_init_regions] write$damon_monitor_on : fd_damon_monitor_on [openat$damon_monitor_on] write$damon_schemes : fd_damon_schemes [openat$damon_schemes] write$damon_target_ids : fd_damon_target_ids [openat$damon_target_ids] write$evdev : fd_evdev [syz_open_dev$evdev] write$hidraw : fd_hidraw [syz_open_dev$hidraw] write$input_event : time_usec [getitimer getrusage getsockopt$sock_timeval ...] write$midi : fd_midi [syz_open_dev$admmidi syz_open_dev$amidi syz_open_dev$dmmidi syz_open_dev$midi syz_open_dev$sndmidi] write$nbd : sock_nbd_server [socketpair$nbd] write$proc_reclaim : fd_proc_reclaim [openat$proc_reclaim] write$snddsp : fd_snd_dsp [syz_open_dev$sndpcmc syz_open_dev$sndpcmp] write$sndhw : fd_snd_hw [syz_open_dev$sndhw] write$sndhw_fireworks : fd_snd_hw [syz_open_dev$sndhw] write$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] write$trusty_avb : fd_trusty_avb [openat$trusty_avb] write$trusty_gatekeeper : fd_trusty_gatekeeper [openat$trusty_gatekeeper] write$trusty_hwkey : fd_trusty_hwkey [openat$trusty_hwkey] write$trusty_hwrng : fd_trusty_hwrng [openat$trusty_hwrng] write$trusty_km : fd_trusty_km [openat$trusty_km] write$trusty_km_secure : fd_trusty_km_secure [openat$trusty_km_secure] write$trusty_storage : fd_trusty_storage [openat$trusty_storage] write$tun : tcp_seq_num [syz_extract_tcp_res syz_extract_tcp_res$synack] write$usbip_server : fd_usbip_server [syz_usbip_server_init] BinFmtMisc : enabled Comparisons : enabled Coverage : enabled DelayKcovMmap : enabled DevlinkPCI : PCI device 0000:00:10.0 is not available ExtraCoverage : enabled Fault : enabled KCSAN : write(/sys/kernel/debug/kcsan, on) failed KcovResetIoctl : kernel does not support ioctl(KCOV_RESET_TRACE) LRWPANEmulation : enabled Leak : failed to write(kmemleak, "scan=off") NetDevices : enabled NetInjection : enabled NicVF : PCI device 0000:00:11.0 is not available SandboxAndroid : setfilecon: setxattr failed. (errno 1: Operation not permitted). . process exited with status 67. SandboxNamespace : enabled SandboxNone : enabled SandboxSetuid : enabled Swap : enabled USBEmulation : enabled VhciInjection : enabled WifiEmulation : enabled syscalls : 905/8062 2025/12/31 04:09:23 base: machine check complete 2025/12/31 04:09:25 coverage filter: __add_stripe_bio: [__add_stripe_bio] 2025/12/31 04:09:25 coverage filter: __bch_btree_node_write: [__bch_btree_node_write] 2025/12/31 04:09:25 coverage filter: __bch_submit_bbio: [__bch_submit_bbio] 2025/12/31 04:09:25 coverage filter: __bio_add_page: [__bio_add_page] 2025/12/31 04:09:25 coverage filter: __bio_advance: [__bio_advance] 2025/12/31 04:09:25 coverage filter: __bio_alloc: [__bio_alloc] 2025/12/31 04:09:25 coverage filter: __bio_clone: [__bio_clone] 2025/12/31 04:09:25 coverage filter: __bio_crypt_advance: [__bio_crypt_advance] 2025/12/31 04:09:25 coverage filter: __bio_crypt_clone: [__bio_crypt_clone] 2025/12/31 04:09:25 coverage filter: __bio_crypt_free_ctx: [__bio_crypt_free_ctx] 2025/12/31 04:09:25 coverage filter: __bio_integrity_endio: [__bio_integrity_endio] 2025/12/31 04:09:25 coverage filter: __blk_crypto_bio_prep: [__blk_crypto_bio_prep] 2025/12/31 04:09:25 coverage filter: __blk_crypto_rq_bio_prep: [__blk_crypto_rq_bio_prep] 2025/12/31 04:09:25 coverage filter: __blk_mq_alloc_requests: [__blk_mq_alloc_requests] 2025/12/31 04:09:25 coverage filter: __blk_mq_end_request: [__blk_mq_end_request] 2025/12/31 04:09:25 coverage filter: __blk_rq_map_sg: [__blk_rq_map_sg] 2025/12/31 04:09:25 coverage filter: __blk_throtl_bio: [__blk_throtl_bio] 2025/12/31 04:09:25 coverage filter: __blkdev_direct_IO: [__blkdev_direct_IO] 2025/12/31 04:09:25 coverage filter: __blkdev_issue_write_zeroes: [__blkdev_issue_write_zeroes] 2025/12/31 04:09:25 coverage filter: __blkdev_issue_zero_pages: [__blkdev_issue_zero_pages] 2025/12/31 04:09:25 coverage filter: __blockdev_direct_IO: [__blockdev_direct_IO] 2025/12/31 04:09:25 coverage filter: __ceph_msg_data_cursor_init: [__ceph_msg_data_cursor_init] 2025/12/31 04:09:25 coverage filter: __end_swap_bio_read: [__end_swap_bio_read] 2025/12/31 04:09:25 coverage filter: __end_swap_bio_write: [__end_swap_bio_write] 2025/12/31 04:09:25 coverage filter: __f2fs_commit_super: [__f2fs_commit_super] 2025/12/31 04:09:25 coverage filter: __journal_read_write: [__journal_read_write] 2025/12/31 04:09:25 coverage filter: __map_bio: [__map_bio] 2025/12/31 04:09:25 coverage filter: __multipath_map_bio: [__multipath_map_bio] 2025/12/31 04:09:25 coverage filter: __process_abnormal_io: [__process_abnormal_io] 2025/12/31 04:09:25 coverage filter: __process_bio_read_only: [__process_bio_read_only] 2025/12/31 04:09:25 coverage filter: __read_end_io: [__read_end_io] 2025/12/31 04:09:25 coverage filter: __send_empty_flush: [__send_empty_flush] 2025/12/31 04:09:25 coverage filter: __submit_bio: [__submit_bio] 2025/12/31 04:09:25 coverage filter: __submit_discard_cmd: [__submit_discard_cmd] 2025/12/31 04:09:25 coverage filter: __submit_zone_reset_cmd: [__submit_zone_reset_cmd] 2025/12/31 04:09:25 coverage filter: __swap_writepage: [__swap_writepage] 2025/12/31 04:09:25 coverage filter: __write_super: [__write_super] 2025/12/31 04:09:25 coverage filter: accounted_begin: [accounted_begin] 2025/12/31 04:09:25 coverage filter: add_ra_bio_pages: [add_ra_bio_pages] 2025/12/31 04:09:25 coverage filter: alloc_tio: [alloc_tio] 2025/12/31 04:09:25 coverage filter: aoe_end_buf: [aoe_end_buf] 2025/12/31 04:09:25 coverage filter: aoe_end_request: [aoe_end_request] 2025/12/31 04:09:25 coverage filter: aoecmd_work: [aoecmd_work] 2025/12/31 04:09:25 coverage filter: async_copy_data: [async_copy_data] 2025/12/31 04:09:25 coverage filter: async_io: [__probestub_regmap_async_io_complete __traceiter_regmap_async_io_complete async_io ffs_epfile_async_io_complete nfs_async_iocounter_wait ppp_async_ioctl] 2025/12/31 04:09:25 coverage filter: async_pmem_flush: [async_pmem_flush] 2025/12/31 04:09:25 coverage filter: ata_rw_frameinit: [ata_rw_frameinit] 2025/12/31 04:09:25 coverage filter: backing_request_endio: [backing_request_endio] 2025/12/31 04:09:25 coverage filter: bcache_write_super: [bcache_write_super bcache_write_super_unlock] 2025/12/31 04:09:25 coverage filter: bch_bbio_endio: [bch_bbio_endio] 2025/12/31 04:09:25 coverage filter: bch_bio_alloc_pages: [bch_bio_alloc_pages] 2025/12/31 04:09:25 coverage filter: bch_bio_map: [bch_bio_map] 2025/12/31 04:09:25 coverage filter: bch_btree_node_read: [bch_btree_node_read bch_btree_node_read_done] 2025/12/31 04:09:25 coverage filter: bch_cache_read_endio: [bch_cache_read_endio] 2025/12/31 04:09:25 coverage filter: bch_data_insert: [bch_data_insert bch_data_insert_endio bch_data_insert_error bch_data_insert_keys bch_data_insert_start] 2025/12/31 04:09:25 coverage filter: bch_data_insert_endio: [] 2025/12/31 04:09:25 coverage filter: bch_data_insert_start: [] 2025/12/31 04:09:25 coverage filter: bch_data_invalidate: [bch_data_invalidate] 2025/12/31 04:09:25 coverage filter: bch_moving_gc: [bch_moving_gc] 2025/12/31 04:09:25 coverage filter: bch_write_bdev_super: [bch_write_bdev_super bch_write_bdev_super_unlock] 2025/12/31 04:09:25 coverage filter: bdev_rw_virt: [bdev_rw_virt] 2025/12/31 04:09:25 coverage filter: bfq_actuator_index: [bfq_actuator_index] 2025/12/31 04:09:25 coverage filter: bfq_bic_update_cgroup: [bfq_bic_update_cgroup] 2025/12/31 04:09:25 coverage filter: bfq_bio_bfqg: [bfq_bio_bfqg] 2025/12/31 04:09:25 coverage filter: bfq_request_merge: [bfq_request_merge bfq_request_merged] 2025/12/31 04:09:25 coverage filter: bfq_setup_cooperator: [bfq_setup_cooperator] 2025/12/31 04:09:25 coverage filter: bfqg_stats_update_legacy_io: [bfqg_stats_update_legacy_io] 2025/12/31 04:09:25 coverage filter: bio_add_folio_nofail: [bio_add_folio_nofail] 2025/12/31 04:09:25 coverage filter: bio_add_page: [bio_add_page dio_bio_add_page] 2025/12/31 04:09:25 coverage filter: bio_add_virt_nofail: [bio_add_virt_nofail] 2025/12/31 04:09:25 coverage filter: bio_alloc_bioset: [bio_alloc_bioset] 2025/12/31 04:09:25 coverage filter: bio_alloc_cache_prune: [bio_alloc_cache_prune] 2025/12/31 04:09:25 coverage filter: bio_alloc_clone: [bio_alloc_clone] 2025/12/31 04:09:25 coverage filter: bio_associate_blkg: [bio_associate_blkg bio_associate_blkg_from_css bio_associate_blkg_from_page] 2025/12/31 04:09:25 coverage filter: bio_associate_blkg_from_css: [] 2025/12/31 04:09:25 coverage filter: bio_attempt_back_merge: [bio_attempt_back_merge] 2025/12/31 04:09:25 coverage filter: bio_attempt_discard_merge: [bio_attempt_discard_merge] 2025/12/31 04:09:25 coverage filter: bio_attempt_front_merge: [bio_attempt_front_merge] 2025/12/31 04:09:25 coverage filter: bio_await_chain: [bio_await_chain] 2025/12/31 04:09:25 coverage filter: bio_blkcg_css: [bio_blkcg_css] 2025/12/31 04:09:25 coverage filter: bio_chain: [bio_chain bio_chain_and_submit] 2025/12/31 04:09:25 coverage filter: bio_chain_and_submit: [] 2025/12/31 04:09:25 coverage filter: bio_check_pages_dirty: [bio_check_pages_dirty] 2025/12/31 04:09:25 coverage filter: bio_clone_blkg_association: [bio_clone_blkg_association] 2025/12/31 04:09:25 coverage filter: bio_cmd_bio_end_io: [bio_cmd_bio_end_io] 2025/12/31 04:09:25 coverage filter: bio_complete: [__bpf_trace_block_bio_complete __probestub_block_bio_complete __traceiter_block_bio_complete bio_complete bio_complete blk_add_trace_bio_complete cached_dev_bio_complete perf_trace_block_bio_complete trace_event_raw_event_block_bio_complete trace_raw_output_block_bio_complete] 2025/12/31 04:09:25 coverage filter: bio_copy_block: [bio_copy_block] 2025/12/31 04:09:25 coverage filter: bio_copy_data: [bio_copy_data bio_copy_data_iter] 2025/12/31 04:09:25 coverage filter: bio_copy_data_iter: [] 2025/12/31 04:09:25 coverage filter: bio_copy_kern_endio_read: [bio_copy_kern_endio_read] 2025/12/31 04:09:25 coverage filter: bio_crypt_rq_ctx_compatible: [bio_crypt_rq_ctx_compatible] 2025/12/31 04:09:25 coverage filter: bio_crypt_set_ctx: [bio_crypt_set_ctx] 2025/12/31 04:09:25 coverage filter: bio_dirty_fn: [bio_dirty_fn] 2025/12/31 04:09:25 coverage filter: bio_end_io_acct_remapped: [bio_end_io_acct_remapped] 2025/12/31 04:09:25 coverage filter: bio_endio: [bio_endio blk_zone_mgmt_bio_endio blk_zone_write_plug_bio_endio dmz_bio_endio] 2025/12/31 04:09:25 coverage filter: bio_first_folio: [bio_first_folio bio_first_folio bio_first_folio bio_first_folio bio_first_folio bio_first_folio bio_first_folio bio_first_folio bio_first_folio bio_first_folio bio_first_folio bio_first_folio bio_first_folio bio_first_folio bio_first_folio bio_first_folio bio_first_folio] 2025/12/31 04:09:25 coverage filter: bio_free: [bch_bbio_free bio_free bio_free_pages] 2025/12/31 04:09:25 coverage filter: bio_free_pages: [] 2025/12/31 04:09:25 coverage filter: bio_init: [bio_init bio_init_clone btrfs_bio_init] 2025/12/31 04:09:25 coverage filter: bio_init_clone: [] 2025/12/31 04:09:25 coverage filter: bio_integrity_add_page: [bio_integrity_add_page] 2025/12/31 04:09:25 coverage filter: bio_integrity_advance: [bio_integrity_advance] 2025/12/31 04:09:25 coverage filter: bio_integrity_alloc: [bio_integrity_alloc bio_integrity_alloc_buf] 2025/12/31 04:09:25 coverage filter: bio_integrity_alloc_buf: [] 2025/12/31 04:09:25 coverage filter: bio_integrity_clone: [bio_integrity_clone] 2025/12/31 04:09:25 coverage filter: bio_integrity_free: [bio_integrity_free bio_integrity_free_buf] 2025/12/31 04:09:25 coverage filter: bio_integrity_init: [bio_integrity_init] 2025/12/31 04:09:25 coverage filter: bio_integrity_map_iter: [bio_integrity_map_iter] 2025/12/31 04:09:25 coverage filter: bio_integrity_map_user: [bio_integrity_map_user] 2025/12/31 04:09:25 coverage filter: bio_integrity_prep: [bio_integrity_prep] 2025/12/31 04:09:25 coverage filter: bio_integrity_trim: [bio_integrity_trim] 2025/12/31 04:09:25 coverage filter: bio_integrity_unmap_user: [bio_integrity_unmap_user] 2025/12/31 04:09:25 coverage filter: bio_integrity_verify_fn: [bio_integrity_verify_fn] 2025/12/31 04:09:25 coverage filter: bio_iov_bvec_set: [bio_iov_bvec_set] 2025/12/31 04:09:25 coverage filter: bio_iov_iter_get_pages: [bio_iov_iter_get_pages] 2025/12/31 04:09:25 coverage filter: bio_iter_phys: [bio_iter_phys] 2025/12/31 04:09:25 coverage filter: bio_poll: [bio_poll] 2025/12/31 04:09:25 coverage filter: bio_put: [bio_put] 2025/12/31 04:09:25 coverage filter: bio_reset: [bio_reset] 2025/12/31 04:09:25 coverage filter: bio_seg_gap: [bio_seg_gap] 2025/12/31 04:09:25 coverage filter: bio_set_pages_dirty: [bio_set_pages_dirty] 2025/12/31 04:09:25 coverage filter: bio_split: [bio_split bio_split_discard bio_split_io_at bio_split_rw bio_split_to_limits bio_split_write_zeroes bio_split_zone_append] 2025/12/31 04:09:25 coverage filter: bio_split_discard: [] 2025/12/31 04:09:25 coverage filter: bio_split_io_at: [] 2025/12/31 04:09:25 coverage filter: bio_split_rw: [] 2025/12/31 04:09:25 coverage filter: bio_split_to_limits: [] 2025/12/31 04:09:25 coverage filter: bio_split_write_zeroes: [] 2025/12/31 04:09:25 coverage filter: bio_submit_split_bioset: [bio_submit_split_bioset] 2025/12/31 04:09:25 coverage filter: bio_trim: [bio_trim] 2025/12/31 04:09:25 coverage filter: bio_truncate: [bio_truncate] 2025/12/31 04:09:25 coverage filter: bio_uninit: [bio_uninit] 2025/12/31 04:09:25 coverage filter: bio_wait_end_io: [bio_wait_end_io] 2025/12/31 04:09:25 coverage filter: bio_will_gap: [bio_will_gap] 2025/12/31 04:09:25 coverage filter: bl_end_io_read: [bl_end_io_read] 2025/12/31 04:09:25 coverage filter: bl_end_io_write: [bl_end_io_write] 2025/12/31 04:09:25 coverage filter: bl_read_pagelist: [bl_read_pagelist] 2025/12/31 04:09:25 coverage filter: bl_write_pagelist: [bl_write_pagelist] 2025/12/31 04:09:25 coverage filter: blk_add_rq_to_plug: [blk_add_rq_to_plug] 2025/12/31 04:09:25 coverage filter: blk_add_trace_bio: [blk_add_trace_bio blk_add_trace_bio_backmerge blk_add_trace_bio_frontmerge blk_add_trace_bio_queue blk_add_trace_bio_remap] 2025/12/31 04:09:25 coverage filter: blk_add_trace_bio_remap: [] 2025/12/31 04:09:25 coverage filter: blk_add_trace_split: [blk_add_trace_split] 2025/12/31 04:09:25 coverage filter: blk_alloc_discard_bio: [blk_alloc_discard_bio] 2025/12/31 04:09:25 coverage filter: blk_attempt_bio_merge: [blk_attempt_bio_merge] 2025/12/31 04:09:25 coverage filter: blk_cgroup_bio_start: [blk_cgroup_bio_start] 2025/12/31 04:09:25 coverage filter: blk_crypto_fallback_bio_prep: [blk_crypto_fallback_bio_prep] 2025/12/31 04:09:25 coverage filter: blk_crypto_fallback_decrypt_bio: [blk_crypto_fallback_decrypt_bio] 2025/12/31 04:09:25 coverage filter: blk_crypto_fallback_decrypt_endio: [blk_crypto_fallback_decrypt_endio] 2025/12/31 04:09:25 coverage filter: blk_crypto_fallback_encrypt_endio: [blk_crypto_fallback_encrypt_endio] 2025/12/31 04:09:25 coverage filter: blk_dump_rq_flags: [blk_dump_rq_flags] 2025/12/31 04:09:25 coverage filter: blk_flush_complete_seq: [blk_flush_complete_seq] 2025/12/31 04:09:25 coverage filter: blk_insert_cloned_request: [blk_insert_cloned_request] 2025/12/31 04:09:25 coverage filter: blk_integrity_complete: [blk_integrity_complete] 2025/12/31 04:09:25 coverage filter: blk_integrity_generate: [blk_integrity_generate] 2025/12/31 04:09:25 coverage filter: blk_integrity_merge_bio: [blk_integrity_merge_bio] 2025/12/31 04:09:25 coverage filter: blk_integrity_merge_rq: [blk_integrity_merge_rq] 2025/12/31 04:09:25 coverage filter: blk_integrity_prepare: [blk_integrity_prepare] 2025/12/31 04:09:25 coverage filter: blk_integrity_verify_iter: [blk_integrity_verify_iter] 2025/12/31 04:09:25 coverage filter: blk_map_iter_next: [blk_map_iter_next] 2025/12/31 04:09:25 coverage filter: blk_mq_add_hw_queues_cpuhp: [blk_mq_add_hw_queues_cpuhp] 2025/12/31 04:09:25 coverage filter: blk_mq_cancel_work_sync: [blk_mq_cancel_work_sync] 2025/12/31 04:09:25 coverage filter: blk_mq_destroy_queue: [blk_mq_destroy_queue] 2025/12/31 04:09:25 coverage filter: blk_mq_dispatch_list: [blk_mq_dispatch_list] 2025/12/31 04:09:25 coverage filter: blk_mq_dispatch_queue_requests: [blk_mq_dispatch_queue_requests] 2025/12/31 04:09:25 coverage filter: blk_mq_dispatch_rq_list: [blk_mq_dispatch_rq_list] 2025/12/31 04:09:25 coverage filter: blk_mq_dispatch_wake: [blk_mq_dispatch_wake] 2025/12/31 04:09:25 coverage filter: blk_mq_end_request_batch: [blk_mq_end_request_batch] 2025/12/31 04:09:25 coverage filter: blk_mq_exit_queue: [blk_mq_exit_queue] 2025/12/31 04:09:25 coverage filter: blk_mq_hctx_notify_dead: [blk_mq_hctx_notify_dead] 2025/12/31 04:09:25 coverage filter: blk_mq_hctx_notify_offline: [blk_mq_hctx_notify_offline] 2025/12/31 04:09:25 coverage filter: blk_mq_hctx_notify_online: [blk_mq_hctx_notify_online] 2025/12/31 04:09:25 coverage filter: blk_mq_init_allocated_queue: [blk_mq_init_allocated_queue] 2025/12/31 04:09:25 coverage filter: blk_mq_insert_request: [blk_mq_insert_request] 2025/12/31 04:09:25 coverage filter: blk_mq_map_swqueue: [blk_mq_map_swqueue] 2025/12/31 04:09:25 coverage filter: blk_mq_release: [blk_mq_release] 2025/12/31 04:09:25 coverage filter: blk_mq_request_issue_directly: [blk_mq_request_issue_directly] 2025/12/31 04:09:25 coverage filter: blk_mq_requeue_work: [blk_mq_requeue_work] 2025/12/31 04:09:25 coverage filter: blk_mq_start_request: [blk_mq_start_request] 2025/12/31 04:09:25 coverage filter: blk_mq_submit_bio: [blk_mq_submit_bio] 2025/12/31 04:09:25 coverage filter: blk_mq_timeout_work: [blk_mq_timeout_work] 2025/12/31 04:09:25 coverage filter: blk_mq_try_issue_directly: [blk_mq_try_issue_directly] 2025/12/31 04:09:25 coverage filter: blk_mq_update_nr_requests: [blk_mq_update_nr_requests] 2025/12/31 04:09:25 coverage filter: blk_mq_update_tag_set_shared: [blk_mq_update_tag_set_shared] 2025/12/31 04:09:25 coverage filter: blk_next_bio: [blk_next_bio] 2025/12/31 04:09:25 coverage filter: blk_recalc_rq_segments: [blk_recalc_rq_segments] 2025/12/31 04:09:25 coverage filter: blk_rq_append_bio: [blk_rq_append_bio] 2025/12/31 04:09:25 coverage filter: blk_rq_count_integrity_sg: [blk_rq_count_integrity_sg] 2025/12/31 04:09:25 coverage filter: blk_rq_cur_bytes: [blk_rq_cur_bytes] 2025/12/31 04:09:25 coverage filter: blk_rq_dma_map_iter_start: [blk_rq_dma_map_iter_start] 2025/12/31 04:09:25 coverage filter: blk_rq_integrity_dma_map_iter_next: [blk_rq_integrity_dma_map_iter_next] 2025/12/31 04:09:25 coverage filter: blk_rq_integrity_dma_map_iter_start: [blk_rq_integrity_dma_map_iter_start] 2025/12/31 04:09:25 coverage filter: blk_rq_map_integrity_sg: [blk_rq_map_integrity_sg] 2025/12/31 04:09:25 coverage filter: blk_rq_map_kern: [blk_rq_map_kern] 2025/12/31 04:09:25 coverage filter: blk_rq_map_user_iov: [blk_rq_map_user_iov] 2025/12/31 04:09:25 coverage filter: blk_rq_merge_ok: [blk_rq_merge_ok] 2025/12/31 04:09:25 coverage filter: blk_rq_prep_clone: [blk_rq_prep_clone] 2025/12/31 04:09:25 coverage filter: blk_rq_unmap_user: [blk_rq_unmap_user] 2025/12/31 04:09:25 coverage filter: blk_try_merge: [blk_try_merge] 2025/12/31 04:09:25 coverage filter: blk_update_request: [blk_update_request] 2025/12/31 04:09:25 coverage filter: blk_zone_append_update_request_bio: [__probestub_blk_zone_append_update_request_bio __traceiter_blk_zone_append_update_request_bio blk_zone_append_update_request_bio] 2025/12/31 04:09:25 coverage filter: blk_zone_mgmt_bio_endio: [] 2025/12/31 04:09:25 coverage filter: blk_zone_plug_bio: [blk_zone_plug_bio] 2025/12/31 04:09:25 coverage filter: blk_zone_wplug_bio_work: [blk_zone_wplug_bio_work] 2025/12/31 04:09:25 coverage filter: blk_zone_wplug_prepare_bio: [blk_zone_wplug_prepare_bio] 2025/12/31 04:09:25 coverage filter: blk_zone_write_plug_bio_endio: [] 2025/12/31 04:09:25 coverage filter: blk_zone_write_plug_bio_merged: [blk_zone_write_plug_bio_merged] 2025/12/31 04:09:25 coverage filter: blk_zone_write_plug_init_request: [blk_zone_write_plug_init_request] 2025/12/31 04:09:25 coverage filter: blkcg_iolatency_done_bio: [blkcg_iolatency_done_bio] 2025/12/31 04:09:25 coverage filter: blkcg_iolatency_throttle: [blkcg_iolatency_throttle] 2025/12/31 04:09:25 coverage filter: blkcg_punt_bio_submit: [blkcg_punt_bio_submit] 2025/12/31 04:09:25 coverage filter: blkcg_set_ioprio: [blkcg_set_ioprio] 2025/12/31 04:09:25 coverage filter: blkdev_bio_end_io: [blkdev_bio_end_io blkdev_bio_end_io_async] 2025/12/31 04:09:25 coverage filter: blkdev_bio_end_io_async: [] 2025/12/31 04:09:25 coverage filter: blkdev_direct_IO: [blkdev_direct_IO] 2025/12/31 04:09:25 coverage filter: blkdev_issue_secure_erase: [blkdev_issue_secure_erase] 2025/12/31 04:09:25 coverage filter: blkdev_uring_cmd: [blkdev_uring_cmd] 2025/12/31 04:09:25 coverage filter: blkdev_zone_mgmt: [__bpf_trace_blkdev_zone_mgmt __probestub_blkdev_zone_mgmt __traceiter_blkdev_zone_mgmt blkdev_zone_mgmt blkdev_zone_mgmt_ioctl perf_trace_blkdev_zone_mgmt trace_blkdev_zone_mgmt trace_event_raw_event_blkdev_zone_mgmt trace_raw_output_blkdev_zone_mgmt] 2025/12/31 04:09:25 coverage filter: brd_submit_bio: [brd_submit_bio] 2025/12/31 04:09:25 coverage filter: btree_csum_one_bio: [btree_csum_one_bio] 2025/12/31 04:09:25 coverage filter: btree_node_read_endio: [btree_node_read_endio] 2025/12/31 04:09:25 coverage filter: btree_node_write_endio: [btree_node_write_endio] 2025/12/31 04:09:25 coverage filter: btrfs_alloc_dummy_sum: [btrfs_alloc_dummy_sum] 2025/12/31 04:09:25 coverage filter: btrfs_check_read_bio: [btrfs_check_read_bio] 2025/12/31 04:09:25 coverage filter: btrfs_csum_one_bio: [btrfs_csum_one_bio] 2025/12/31 04:09:25 coverage filter: btrfs_decompress_buf2page: [btrfs_decompress_buf2page] 2025/12/31 04:09:25 coverage filter: btrfs_dio_end_io: [btrfs_dio_end_io] 2025/12/31 04:09:25 coverage filter: btrfs_dio_submit_io: [btrfs_dio_submit_io] 2025/12/31 04:09:25 coverage filter: btrfs_encoded_read_regular_fill_pages: [btrfs_encoded_read_regular_fill_pages] 2025/12/31 04:09:25 coverage filter: btrfs_end_empty_barrier: [btrfs_end_empty_barrier] 2025/12/31 04:09:25 coverage filter: btrfs_end_super_write: [btrfs_end_super_write] 2025/12/31 04:09:25 coverage filter: btrfs_lookup_bio_sums: [btrfs_lookup_bio_sums] 2025/12/31 04:09:25 coverage filter: btrfs_raid56_end_io: [btrfs_raid56_end_io] 2025/12/31 04:09:25 coverage filter: btrfs_record_physical_zoned: [btrfs_record_physical_zoned] 2025/12/31 04:09:25 coverage filter: btrfs_repair_io_failure: [btrfs_repair_io_failure] 2025/12/31 04:09:25 coverage filter: btrfs_simple_end_io: [btrfs_simple_end_io] 2025/12/31 04:09:25 coverage filter: btrfs_submit_bbio: [btrfs_submit_bbio] 2025/12/31 04:09:25 coverage filter: btrfs_submit_bio: [btrfs_submit_bio] 2025/12/31 04:09:25 coverage filter: btrfs_submit_compressed_read: [btrfs_submit_compressed_read] 2025/12/31 04:09:25 coverage filter: btrfs_submit_compressed_write: [btrfs_submit_compressed_write] 2025/12/31 04:09:25 coverage filter: btrfs_submit_dev_bio: [btrfs_submit_dev_bio] 2025/12/31 04:09:25 coverage filter: btrfs_submit_repair_write: [btrfs_submit_repair_write] 2025/12/31 04:09:25 coverage filter: btrfs_use_zone_append: [btrfs_use_zone_append] 2025/12/31 04:09:25 coverage filter: btt_submit_bio: [btt_submit_bio] 2025/12/31 04:09:25 coverage filter: cache_lookup: [btrfs_lru_cache_lookup cache_lookup cache_lookup_fn ctxt_cache_lookup_is_visible fscache_lookup_cache hci_inquiry_cache_lookup hci_inquiry_cache_lookup_resolve hci_inquiry_cache_lookup_unknown int_cache_lookup_is_visible nfsd_cache_lookup pasid_cache_lookup_is_visible regcache_lookup_reg sunrpc_cache_lookup_rcu xfs_mru_cache_lookup xfs_qm_dqget_cache_lookup] 2025/12/31 04:09:25 coverage filter: cache_lookup_fn: [] 2025/12/31 04:09:25 coverage filter: cache_map: [cache_map regcache_maple_drop regcache_maple_exit regcache_maple_init regcache_maple_insert_block regcache_maple_populate regcache_maple_read regcache_maple_sync regcache_maple_sync_block regcache_maple_write writecache_map writecache_map_discard writecache_map_flush] 2025/12/31 04:09:25 coverage filter: cached_dev_cache_miss: [cached_dev_cache_miss cached_dev_cache_miss_done] 2025/12/31 04:09:25 coverage filter: cached_dev_nodata: [cached_dev_nodata] 2025/12/31 04:09:25 coverage filter: cached_dev_read_done: [cached_dev_read_done cached_dev_read_done_bh] 2025/12/31 04:09:25 coverage filter: cached_dev_read_error: [cached_dev_read_error cached_dev_read_error_done] 2025/12/31 04:09:25 coverage filter: cached_dev_submit_bio: [cached_dev_submit_bio] 2025/12/31 04:09:25 coverage filter: ceph_msg_data_advance: [ceph_msg_data_advance] 2025/12/31 04:09:25 coverage filter: ceph_msg_data_next: [ceph_msg_data_next] 2025/12/31 04:09:25 coverage filter: clone_bio: [clone_bio end_clone_bio] 2025/12/31 04:09:25 coverage filter: clone_free: [clone_free] 2025/12/31 04:09:25 coverage filter: clone_map: [clone_map] 2025/12/31 04:09:25 coverage filter: clone_write_end_io_work: [clone_write_end_io_work] 2025/12/31 04:09:25 coverage filter: cmp_cells: [cmp_cells] 2025/12/31 04:09:25 coverage filter: complete_discard_bio: [complete_discard_bio] 2025/12/31 04:09:25 coverage filter: copy_bio_to_actor: [copy_bio_to_actor] 2025/12/31 04:09:25 coverage filter: corrupt_bio_common: [corrupt_bio_common] 2025/12/31 04:09:25 coverage filter: crypt_alloc_buffer: [crypt_alloc_buffer] 2025/12/31 04:09:25 coverage filter: crypt_convert: [crypt_convert] 2025/12/31 04:09:25 coverage filter: crypt_endio: [crypt_endio] 2025/12/31 04:09:25 coverage filter: crypt_free_buffer_pages: [crypt_free_buffer_pages] 2025/12/31 04:09:25 coverage filter: crypt_map: [crypt_map] 2025/12/31 04:09:25 coverage filter: csum_one_bio: [csum_one_bio csum_one_bio_work] 2025/12/31 04:09:25 coverage filter: dd_request_merge: [dd_request_merge dd_request_merged] 2025/12/31 04:09:25 coverage filter: dec_in_flight: [dec_in_flight] 2025/12/31 04:09:25 coverage filter: detached_dev_end_io: [detached_dev_end_io] 2025/12/31 04:09:25 coverage filter: dio_bio_end_aio: [dio_bio_end_aio] 2025/12/31 04:09:25 coverage filter: dio_bio_end_io: [dio_bio_end_io iomap_dio_bio_end_io] 2025/12/31 04:09:25 coverage filter: dio_new_bio: [dio_new_bio] 2025/12/31 04:09:25 coverage filter: dio_send_cur_page: [dio_send_cur_page] 2025/12/31 04:09:25 coverage filter: dirty_endio: [dirty_endio read_dirty_endio] 2025/12/31 04:09:25 coverage filter: dm_accept_partial_bio: [dm_accept_partial_bio] 2025/12/31 04:09:25 coverage filter: dm_bio_restore: [dm_bio_restore] 2025/12/31 04:09:25 coverage filter: dm_crypt_integrity_io_alloc: [dm_crypt_integrity_io_alloc] 2025/12/31 04:09:25 coverage filter: dm_integrity_check: [dm_integrity_check dm_integrity_check_limits] 2025/12/31 04:09:25 coverage filter: dm_integrity_check_limits: [] 2025/12/31 04:09:25 coverage filter: dm_integrity_inline_recheck: [dm_integrity_inline_recheck] 2025/12/31 04:09:25 coverage filter: dm_integrity_map: [dm_integrity_map dm_integrity_map_continue dm_integrity_map_inline] 2025/12/31 04:09:25 coverage filter: dm_integrity_map_continue: [] 2025/12/31 04:09:25 coverage filter: dm_integrity_map_inline: [] 2025/12/31 04:09:25 coverage filter: dm_io: [__dm_io_complete dm_bufio_get_dm_io_client dm_io dm_io_acct dm_io_client_create dm_io_client_destroy dm_io_exit dm_io_rewind dm_io_set_error wdm_ioctl] 2025/12/31 04:09:25 coverage filter: dm_io_acct: [] 2025/12/31 04:09:25 coverage filter: dm_io_rewind: [] 2025/12/31 04:09:25 coverage filter: dm_is_zone_write: [dm_is_zone_write] 2025/12/31 04:09:25 coverage filter: dm_poll_bio: [dm_poll_bio] 2025/12/31 04:09:25 coverage filter: dm_rh_bio_to_region: [dm_rh_bio_to_region] 2025/12/31 04:09:25 coverage filter: dm_rh_delay: [dm_rh_delay] 2025/12/31 04:09:25 coverage filter: dm_rh_inc_pending: [dm_rh_inc_pending] 2025/12/31 04:09:25 coverage filter: dm_rh_mark_nosync: [dm_rh_mark_nosync] 2025/12/31 04:09:25 coverage filter: dm_rq_bio_constructor: [dm_rq_bio_constructor] 2025/12/31 04:09:25 coverage filter: dm_submit_bio: [dm_submit_bio dm_submit_bio_remap] 2025/12/31 04:09:25 coverage filter: dm_zone_endio: [dm_zone_endio] 2025/12/31 04:09:25 coverage filter: dmz_chunk_work: [dmz_chunk_work] 2025/12/31 04:09:25 coverage filter: dmz_clone_endio: [dmz_clone_endio] 2025/12/31 04:09:25 coverage filter: dmz_get_mblock: [dmz_get_mblock] 2025/12/31 04:09:25 coverage filter: dmz_map: [dmz_map dmz_map_zone] 2025/12/31 04:09:25 coverage filter: dmz_mblock_bio_end_io: [dmz_mblock_bio_end_io] 2025/12/31 04:09:25 coverage filter: dmz_rdwr_block: [dmz_rdwr_block] 2025/12/31 04:09:25 coverage filter: dmz_submit_bio: [dmz_submit_bio] 2025/12/31 04:09:25 coverage filter: dmz_write_dirty_mblocks: [dmz_write_dirty_mblocks] 2025/12/31 04:09:25 coverage filter: do_add_page_to_bio: [do_add_page_to_bio] 2025/12/31 04:09:25 coverage filter: do_mirror: [do_mirror mlx4_do_mirror_rule] 2025/12/31 04:09:25 coverage filter: do_mpage_readpage: [do_mpage_readpage] 2025/12/31 04:09:25 coverage filter: do_worker: [do_worker do_worker] 2025/12/31 04:09:25 coverage filter: elv_merge: [elv_merge elv_merge_requests elv_merged_request] 2025/12/31 04:09:25 coverage filter: end_bbio_compressed_read: [end_bbio_compressed_read] 2025/12/31 04:09:25 coverage filter: end_bbio_data_read: [end_bbio_data_read] 2025/12/31 04:09:25 coverage filter: end_bio_bh_io_sync: [end_bio_bh_io_sync] 2025/12/31 04:09:25 coverage filter: end_clone_bio: [] 2025/12/31 04:09:25 coverage filter: end_reshape_read: [end_reshape_read] 2025/12/31 04:09:25 coverage filter: end_reshape_write: [end_reshape_write] 2025/12/31 04:09:25 coverage filter: end_sync_read: [__end_sync_read end_sync_read end_sync_read] 2025/12/31 04:09:25 coverage filter: end_sync_write: [end_sync_write end_sync_write] 2025/12/31 04:09:25 coverage filter: endio: [abort_endio bio_copy_kern_endio bio_map_kern_endio btrfs_encoded_read_endio btrfs_uring_read_extent_endio btrfs_zone_finish_endio btrfs_zone_finish_endio_workfn clone_endio clone_endio clone_endio do_endio do_endio_flush endio f2fs_submit_discard_endio gfs2_meta_read_endio journal_read_endio journal_write_endio overwrite_endio overwrite_endio overwrite_endio passdown_endio ppl_flush_endio ppl_log_endio prio_endio r5c_handle_cached_data_endio r5l_log_endio r5l_log_flush_endio raid56_scrub_wait_endio raid5_align_endio read_endio read_moving_endio request_endio scrub_read_endio scrub_repair_read_endio scrub_write_endio sleep_on_endio_wait submit_bio_wait_endio thin_endio uuid_endio write_bdev_super_endio write_endio write_super_endio writecache_copy_endio writecache_endio_thread writecache_writeback_endio xfs_discard_endio xfs_discard_endio_work z_erofs_endio] 2025/12/31 04:09:25 coverage filter: ext4_bio_write_folio: [ext4_bio_write_folio] 2025/12/31 04:09:25 coverage filter: ext4_end_bio: [ext4_end_bio] 2025/12/31 04:09:25 coverage filter: ext4_mpage_readpages: [ext4_mpage_readpages] 2025/12/31 04:09:25 coverage filter: ext4_release_io_end: [ext4_release_io_end] 2025/12/31 04:09:25 coverage filter: f2fs_compress_write_end_io: [f2fs_compress_write_end_io] 2025/12/31 04:09:25 coverage filter: f2fs_finish_read_bio: [f2fs_finish_read_bio] 2025/12/31 04:09:25 coverage filter: f2fs_grab_read_bio: [f2fs_grab_read_bio] 2025/12/31 04:09:25 coverage filter: f2fs_merge_page_bio: [f2fs_merge_page_bio] 2025/12/31 04:09:25 coverage filter: f2fs_mpage_readpages: [f2fs_mpage_readpages] 2025/12/31 04:09:25 coverage filter: f2fs_post_read_work: [f2fs_post_read_work] 2025/12/31 04:09:25 coverage filter: f2fs_read_end_io: [f2fs_read_end_io] 2025/12/31 04:09:25 coverage filter: f2fs_read_multi_pages: [f2fs_read_multi_pages] 2025/12/31 04:09:25 coverage filter: f2fs_submit_discard_endio: [] 2025/12/31 04:09:25 coverage filter: f2fs_submit_page_write: [f2fs_submit_page_write] 2025/12/31 04:09:25 coverage filter: f2fs_verify_bio: [f2fs_verify_bio] 2025/12/31 04:09:25 coverage filter: f2fs_write_end_io: [f2fs_write_end_io] 2025/12/31 04:09:25 coverage filter: f2fs_zone_write_end_io: [f2fs_zone_write_end_io] 2025/12/31 04:09:25 coverage filter: flakey_map: [flakey_map] 2025/12/31 04:09:25 coverage filter: flash_dev_cache_miss: [flash_dev_cache_miss] 2025/12/31 04:09:25 coverage filter: flash_dev_submit_bio: [flash_dev_submit_bio] 2025/12/31 04:09:25 coverage filter: fscrypt_zeroout_range: [fscrypt_zeroout_range] 2025/12/31 04:09:25 coverage filter: fsverity_verify_bio: [fsverity_verify_bio] 2025/12/31 04:09:25 coverage filter: full_bio_end_io: [full_bio_end_io] 2025/12/31 04:09:25 coverage filter: get_bio_sector_nr: [get_bio_sector_nr] 2025/12/31 04:09:25 coverage filter: gfs2_end_log_write: [gfs2_end_log_write] 2025/12/31 04:09:25 coverage filter: gfs2_find_jhead: [gfs2_find_jhead] 2025/12/31 04:09:25 coverage filter: gfs2_log_get_bio: [gfs2_log_get_bio] 2025/12/31 04:09:25 coverage filter: gfs2_log_submit_bio: [gfs2_log_submit_bio] 2025/12/31 04:09:25 coverage filter: gfs2_meta_read: [gfs2_meta_read] 2025/12/31 04:09:25 coverage filter: guard_bio_eod: [guard_bio_eod] 2025/12/31 04:09:25 coverage filter: handle_failed_stripe: [handle_failed_stripe] 2025/12/31 04:09:25 coverage filter: handle_stripe: [handle_stripe handle_stripe_clean_event handle_stripe_dirtying handle_stripe_expansion handle_stripe_fill] 2025/12/31 04:09:25 coverage filter: handle_stripe_clean_event: [] 2025/12/31 04:09:25 coverage filter: hib_end_io: [hib_end_io] 2025/12/31 04:09:25 coverage filter: hydration_overwrite: [hydration_overwrite] 2025/12/31 04:09:25 coverage filter: inc_remap_and_issue_cell: [__inc_remap_and_issue_cell inc_remap_and_issue_cell] 2025/12/31 04:09:25 coverage filter: index_rbio_pages: [index_rbio_pages] 2025/12/31 04:09:25 coverage filter: integrity_end_io: [dm_integrity_end_io integrity_end_io] 2025/12/31 04:09:25 coverage filter: integrity_metadata: [integrity_metadata] 2025/12/31 04:09:25 coverage filter: integrity_recalc_inline: [integrity_recalc_inline] 2025/12/31 04:09:25 coverage filter: integrity_recheck: [integrity_recheck] 2025/12/31 04:09:25 coverage filter: io_buffer_register_bvec: [io_buffer_register_bvec] 2025/12/31 04:09:25 coverage filter: io_free_rsrc_node: [io_free_rsrc_node] 2025/12/31 04:09:25 coverage filter: io_import_reg_buf: [io_import_reg_buf] 2025/12/31 04:09:25 coverage filter: ioc_rqos_done_bio: [ioc_rqos_done_bio] 2025/12/31 04:09:25 coverage filter: ioc_rqos_merge: [ioc_rqos_merge] 2025/12/31 04:09:25 coverage filter: ioc_rqos_throttle: [ioc_rqos_throttle] 2025/12/31 04:09:25 coverage filter: iocg_commit_bio: [iocg_commit_bio] 2025/12/31 04:09:25 coverage filter: iomap_add_to_ioend: [__bpf_trace_iomap_add_to_ioend __probestub_iomap_add_to_ioend __traceiter_iomap_add_to_ioend iomap_add_to_ioend perf_trace_iomap_add_to_ioend trace_event_raw_event_iomap_add_to_ioend trace_raw_output_iomap_add_to_ioend] 2025/12/31 04:09:25 coverage filter: iomap_bio_read_folio_range: [iomap_bio_read_folio_range iomap_bio_read_folio_range_sync] 2025/12/31 04:09:25 coverage filter: iomap_bio_read_folio_range_sync: [] 2025/12/31 04:09:25 coverage filter: iomap_dio_bio_end_io: [] 2025/12/31 04:09:25 coverage filter: iomap_dio_bio_iter: [iomap_dio_bio_iter] 2025/12/31 04:09:25 coverage filter: iomap_dio_zero: [iomap_dio_zero] 2025/12/31 04:09:25 coverage filter: iomap_finish_ioend_direct: [iomap_finish_ioend_direct] 2025/12/31 04:09:25 coverage filter: iomap_init_ioend: [iomap_init_ioend] 2025/12/31 04:09:25 coverage filter: iomap_ioend_writeback_submit: [iomap_ioend_writeback_submit] 2025/12/31 04:09:25 coverage filter: iomap_split_ioend: [iomap_split_ioend] 2025/12/31 04:09:25 coverage filter: issue_op: [issue_op] 2025/12/31 04:09:25 coverage filter: journal_read_bucket: [journal_read_bucket] 2025/12/31 04:09:25 coverage filter: journal_read_endio: [] 2025/12/31 04:09:25 coverage filter: journal_write_endio: [] 2025/12/31 04:09:25 coverage filter: journal_write_unlocked: [journal_write_unlocked] 2025/12/31 04:09:25 coverage filter: kcryptd_async_done: [kcryptd_async_done] 2025/12/31 04:09:25 coverage filter: kcryptd_crypt: [kcryptd_crypt kcryptd_crypt_read_continue kcryptd_crypt_write_continue kcryptd_crypt_write_io_submit] 2025/12/31 04:09:25 coverage filter: kcryptd_crypt_read_continue: [] 2025/12/31 04:09:25 coverage filter: kcryptd_io_read: [kcryptd_io_read kcryptd_io_read_work] 2025/12/31 04:09:25 coverage filter: ktio: [ktio] 2025/12/31 04:09:25 coverage filter: lbmIODone: [lbmIODone] 2025/12/31 04:09:25 coverage filter: lbmRead: [lbmRead] 2025/12/31 04:09:25 coverage filter: lbmStartIO: [lbmStartIO] 2025/12/31 04:09:25 coverage filter: linear_map: [linear_map] 2025/12/31 04:09:25 coverage filter: ll_back_merge_fn: [ll_back_merge_fn] 2025/12/31 04:09:25 coverage filter: ll_merge_requests_fn: [ll_merge_requests_fn] 2025/12/31 04:09:25 coverage filter: lo_complete_rq: [lo_complete_rq] 2025/12/31 04:09:25 coverage filter: lo_rw_aio: [lo_rw_aio lo_rw_aio_complete lo_rw_aio_do_completion] 2025/12/31 04:09:25 coverage filter: lz4_uncompress: [lz4_uncompress] 2025/12/31 04:09:25 coverage filter: lzo_uncompress: [lzo_uncompress] 2025/12/31 04:09:25 coverage filter: make_discard_request: [make_discard_request] 2025/12/31 04:09:25 coverage filter: map_bio: [iomap_bio_submit_read map_bio multipath_map_bio pci_map_biosrom pci_unmap_biosrom] 2025/12/31 04:09:25 coverage filter: md_account_bio: [md_account_bio] 2025/12/31 04:09:25 coverage filter: md_end_clone_io: [md_end_clone_io] 2025/12/31 04:09:25 coverage filter: md_end_flush: [md_end_flush] 2025/12/31 04:09:25 coverage filter: md_flush_request: [md_flush_request] 2025/12/31 04:09:25 coverage filter: md_free_cloned_bio: [md_free_cloned_bio] 2025/12/31 04:09:25 coverage filter: md_handle_request: [md_handle_request] 2025/12/31 04:09:25 coverage filter: md_submit_bio: [md_submit_bio] 2025/12/31 04:09:25 coverage filter: md_submit_discard_bio: [md_submit_discard_bio] 2025/12/31 04:09:25 coverage filter: md_write_metadata: [md_write_metadata] 2025/12/31 04:09:25 coverage filter: metapage_read_end_io: [metapage_read_end_io] 2025/12/31 04:09:25 coverage filter: metapage_read_folio: [metapage_read_folio] 2025/12/31 04:09:25 coverage filter: metapage_write_end_io: [metapage_write_end_io] 2025/12/31 04:09:25 coverage filter: metapage_write_folio: [metapage_write_folio] 2025/12/31 04:09:25 coverage filter: mg_copy: [mg_copy] 2025/12/31 04:09:25 coverage filter: mirror_map: [mirror_map] 2025/12/31 04:09:25 coverage filter: mpage_end_io: [mpage_end_io] 2025/12/31 04:09:25 coverage filter: mpage_read_folio: [mpage_read_folio] 2025/12/31 04:09:25 coverage filter: mpage_readahead: [mpage_readahead] 2025/12/31 04:09:25 coverage filter: mpage_writepages: [mpage_writepages] 2025/12/31 04:09:25 coverage filter: mtd_queue_rq: [mtd_queue_rq] 2025/12/31 04:09:25 coverage filter: multipath_map_bio: [] 2025/12/31 04:09:25 coverage filter: multipath_prepare_ioctl: [multipath_prepare_ioctl] 2025/12/31 04:09:25 coverage filter: nbd_send_cmd: [nbd_send_cmd] 2025/12/31 04:09:25 coverage filter: nilfs_end_bio_write: [nilfs_end_bio_write] 2025/12/31 04:09:25 coverage filter: nilfs_segbuf_submit_bh: [nilfs_segbuf_submit_bh] 2025/12/31 04:09:25 coverage filter: nilfs_write_logs: [nilfs_write_logs] 2025/12/31 04:09:25 coverage filter: ntfs_bio_fill_1: [ntfs_bio_fill_1] 2025/12/31 04:09:25 coverage filter: null_handle_memory_backed: [null_handle_memory_backed] 2025/12/31 04:09:25 coverage filter: nvme_failover_req: [nvme_failover_req] 2025/12/31 04:09:25 coverage filter: nvme_ns_head_submit_bio: [nvme_ns_head_submit_bio] 2025/12/31 04:09:25 coverage filter: nvme_prep_rq: [nvme_prep_rq] 2025/12/31 04:09:25 coverage filter: nvme_setup_discard: [nvme_setup_discard] 2025/12/31 04:09:25 coverage filter: nvme_setup_rw: [nvme_setup_rw] 2025/12/31 04:09:25 coverage filter: nvme_tcp_queue_rq: [nvme_tcp_queue_rq] 2025/12/31 04:09:25 coverage filter: nvme_tcp_recv_skb: [nvme_tcp_recv_skb] 2025/12/31 04:09:25 coverage filter: nvme_tcp_try_send: [nvme_tcp_try_send] 2025/12/31 04:09:25 coverage filter: nvme_unmap_metadata: [nvme_unmap_metadata] 2025/12/31 04:09:25 coverage filter: nvmet_bdev_alloc_bip: [nvmet_bdev_alloc_bip] 2025/12/31 04:09:25 coverage filter: nvmet_bdev_execute_dsm: [nvmet_bdev_execute_dsm] 2025/12/31 04:09:25 coverage filter: nvmet_bdev_execute_flush: [nvmet_bdev_execute_flush] 2025/12/31 04:09:25 coverage filter: nvmet_bdev_execute_rw: [nvmet_bdev_execute_rw] 2025/12/31 04:09:25 coverage filter: nvmet_bdev_execute_write_zeroes: [nvmet_bdev_execute_write_zeroes] 2025/12/31 04:09:25 coverage filter: nvmet_bdev_execute_zone_append: [nvmet_bdev_execute_zone_append] 2025/12/31 04:09:25 coverage filter: nvmet_bdev_zmgmt_send_work: [nvmet_bdev_zmgmt_send_work] 2025/12/31 04:09:25 coverage filter: nvmet_bdev_zone_append_bio_done: [nvmet_bdev_zone_append_bio_done] 2025/12/31 04:09:25 coverage filter: nvmet_bio_done: [nvmet_bio_done] 2025/12/31 04:09:25 coverage filter: o2hb_bio_end_io: [o2hb_bio_end_io] 2025/12/31 04:09:25 coverage filter: o2hb_setup_one_bio: [o2hb_setup_one_bio] 2025/12/31 04:09:25 coverage filter: ops_complete_biofill: [ops_complete_biofill] 2025/12/31 04:09:25 coverage filter: optimisable_bio: [optimisable_bio] 2025/12/31 04:09:25 coverage filter: orig_write_end_io_work: [orig_write_end_io_work] 2025/12/31 04:09:25 coverage filter: origin_map: [origin_map] 2025/12/31 04:09:25 coverage filter: overwrite_endio: [] 2025/12/31 04:09:25 coverage filter: passdown_endio: [] 2025/12/31 04:09:25 coverage filter: pending_complete: [pending_complete] 2025/12/31 04:09:25 coverage filter: perf_trace_bcache_bio: [perf_trace_bcache_bio] 2025/12/31 04:09:25 coverage filter: perf_trace_bcache_journal_write: [perf_trace_bcache_journal_write] 2025/12/31 04:09:25 coverage filter: perf_trace_bcache_read: [perf_trace_bcache_read] 2025/12/31 04:09:25 coverage filter: perf_trace_bcache_request: [perf_trace_bcache_request] 2025/12/31 04:09:25 coverage filter: perf_trace_bcache_write: [perf_trace_bcache_write] 2025/12/31 04:09:25 coverage filter: perf_trace_blkdev_zone_mgmt: [] 2025/12/31 04:09:25 coverage filter: perf_trace_block_bio: [perf_trace_block_bio perf_trace_block_bio_remap] 2025/12/31 04:09:25 coverage filter: perf_trace_block_bio_complete: [] 2025/12/31 04:09:25 coverage filter: perf_trace_block_bio_remap: [] 2025/12/31 04:09:25 coverage filter: perf_trace_block_split: [perf_trace_block_split] 2025/12/31 04:09:25 coverage filter: perf_trace_btrfs_raid56_bio: [perf_trace_btrfs_raid56_bio] 2025/12/31 04:09:25 coverage filter: perf_trace_f2fs__bio: [perf_trace_f2fs__bio] 2025/12/31 04:09:25 coverage filter: plug_cmp: [plug_cmp] 2025/12/31 04:09:25 coverage filter: pmem_submit_bio: [pmem_submit_bio] 2025/12/31 04:09:25 coverage filter: ppl_flush_endio: [] 2025/12/31 04:09:25 coverage filter: ppl_handle_flush_request: [ppl_handle_flush_request] 2025/12/31 04:09:25 coverage filter: ppl_log_endio: [] 2025/12/31 04:09:25 coverage filter: ppl_stripe_write_finished: [ppl_stripe_write_finished] 2025/12/31 04:09:25 coverage filter: ppl_write_stripe_run: [ppl_write_stripe_run] 2025/12/31 04:09:25 coverage filter: prio_endio: [] 2025/12/31 04:09:25 coverage filter: prio_io: [prio_io] 2025/12/31 04:09:25 coverage filter: process_bio: [process_bio process_bio_fail process_bio_read_only process_bio_success] 2025/12/31 04:09:25 coverage filter: process_cell: [process_cell process_cell_fail process_cell_read_only process_cell_success] 2025/12/31 04:09:25 coverage filter: process_deferred_bios: [process_deferred_bios] 2025/12/31 04:09:25 coverage filter: process_discard_bio: [process_discard_bio] 2025/12/31 04:09:25 coverage filter: process_prepared_discard_passdown_pt1: [process_prepared_discard_passdown_pt1] 2025/12/31 04:09:25 coverage filter: process_prepared_mapping: [process_prepared_mapping process_prepared_mapping_fail] 2025/12/31 04:09:25 coverage filter: process_queued_bios: [process_queued_bios] 2025/12/31 04:09:25 coverage filter: put_buf: [aa_put_buffer bpf_put_buffers put_buf] 2025/12/31 04:09:25 coverage filter: r10buf_pool_alloc: [r10buf_pool_alloc] 2025/12/31 04:09:25 coverage filter: r10buf_pool_free: [r10buf_pool_free] 2025/12/31 04:09:25 coverage filter: r1buf_pool_alloc: [r1buf_pool_alloc] 2025/12/31 04:09:25 coverage filter: r1buf_pool_free: [r1buf_pool_free] 2025/12/31 04:09:25 coverage filter: r5c_handle_cached_data_endio: [] 2025/12/31 04:09:25 coverage filter: r5l_append_payload_page: [r5l_append_payload_page] 2025/12/31 04:09:25 coverage filter: r5l_flush_stripe_to_raid: [r5l_flush_stripe_to_raid] 2025/12/31 04:09:25 coverage filter: r5l_get_meta: [r5l_get_meta] 2025/12/31 04:09:25 coverage filter: r5l_handle_flush_request: [r5l_handle_flush_request] 2025/12/31 04:09:25 coverage filter: r5l_log_endio: [] 2025/12/31 04:09:25 coverage filter: r5l_recovery_read_page: [r5l_recovery_read_page] 2025/12/31 04:09:25 coverage filter: raid0_handle_discard: [raid0_handle_discard] 2025/12/31 04:09:25 coverage filter: raid0_make_request: [raid0_make_request] 2025/12/31 04:09:25 coverage filter: raid10_alloc_init_r10buf: [raid10_alloc_init_r10buf] 2025/12/31 04:09:25 coverage filter: raid10_end_discard_request: [raid10_end_discard_request] 2025/12/31 04:09:25 coverage filter: raid10_end_read_request: [raid10_end_read_request] 2025/12/31 04:09:25 coverage filter: raid10_end_write_request: [raid10_end_write_request] 2025/12/31 04:09:25 coverage filter: raid10_handle_discard: [raid10_handle_discard] 2025/12/31 04:09:25 coverage filter: raid10_make_request: [raid10_make_request] 2025/12/31 04:09:25 coverage filter: raid10_read_request: [raid10_read_request] 2025/12/31 04:09:25 coverage filter: raid10_sync_request: [raid10_sync_request] 2025/12/31 04:09:25 coverage filter: raid10_write_one_disk: [raid10_write_one_disk] 2025/12/31 04:09:25 coverage filter: raid10d: [raid10d] 2025/12/31 04:09:25 coverage filter: raid1_end_read_request: [raid1_end_read_request] 2025/12/31 04:09:25 coverage filter: raid1_end_write_request: [raid1_end_write_request] 2025/12/31 04:09:25 coverage filter: raid1_make_request: [raid1_make_request] 2025/12/31 04:09:25 coverage filter: raid1_read_request: [raid1_read_request] 2025/12/31 04:09:25 coverage filter: raid1_sync_request: [raid1_sync_request] 2025/12/31 04:09:25 coverage filter: raid1d: [raid1d] 2025/12/31 04:09:25 coverage filter: raid56_parity_alloc_scrub_rbio: [raid56_parity_alloc_scrub_rbio] 2025/12/31 04:09:25 coverage filter: raid56_parity_recover: [raid56_parity_recover] 2025/12/31 04:09:25 coverage filter: raid56_scrub_wait_endio: [] 2025/12/31 04:09:25 coverage filter: raid5_align_endio: [] 2025/12/31 04:09:25 coverage filter: raid5_end_read_request: [raid5_end_read_request] 2025/12/31 04:09:25 coverage filter: raid5_end_write_request: [raid5_end_write_request] 2025/12/31 04:09:25 coverage filter: raid5_make_request: [raid5_make_request] 2025/12/31 04:09:25 coverage filter: raid5d: [raid5d] 2025/12/31 04:09:25 coverage filter: raid_map: [raid_map] 2025/12/31 04:09:25 coverage filter: raid_wait_read_end_io: [raid_wait_read_end_io] 2025/12/31 04:09:25 coverage filter: raid_wait_write_end_io: [raid_wait_write_end_io] 2025/12/31 04:09:25 coverage filter: rbio_add_bio: [rbio_add_bio] 2025/12/31 04:09:25 coverage filter: rbio_add_io_paddrs: [rbio_add_io_paddrs] 2025/12/31 04:09:25 coverage filter: rbio_update_error_bitmap: [rbio_update_error_bitmap] 2025/12/31 04:09:25 coverage filter: read_dirty: [read_dirty read_dirty_submit] 2025/12/31 04:09:25 coverage filter: read_dirty_endio: [] 2025/12/31 04:09:25 coverage filter: read_extent_buffer_pages_nowait: [read_extent_buffer_pages_nowait] 2025/12/31 04:09:25 coverage filter: read_moving_endio: [] 2025/12/31 04:09:25 coverage filter: recv_work: [fcloop_fcp_abort_recv_work fcloop_fcp_recv_work nvmet_bdev_zone_zmgmt_recv_work rds_recv_worker recv_work tipc_conn_recv_work] 2025/12/31 04:09:25 coverage filter: regular_request_wait: [regular_request_wait] 2025/12/31 04:09:25 coverage filter: remap: [__bpf_trace_block_bio_remap __bpf_trace_block_rq_remap __bpf_trace_xfs_reflink_remap_blocks __devm_ioremap_resource __generic_remap_file_range_prep __ia32_sys_mremap __ia32_sys_remap_file_pages __ioremap_caller __ioremap_collect_map_flags __probestub_block_bio_remap __probestub_block_rq_remap __probestub_xfs_reflink_cow_remap_from __probestub_xfs_reflink_cow_remap_skip __probestub_xfs_reflink_cow_remap_to __probestub_xfs_reflink_remap_blocks __probestub_xfs_reflink_remap_blocks_error __probestub_xfs_reflink_remap_extent_dest __probestub_xfs_reflink_remap_extent_error __probestub_xfs_reflink_remap_extent_src __probestub_xfs_reflink_remap_range __probestub_xfs_reflink_remap_range_error __probestub_xfs_swap_extent_rmap_remap __probestub_xfs_swap_extent_rmap_remap_piece __remap_and_issue_shared_cell __se_sys_mremap __se_sys_remap_file_pages __traceiter_block_bio_remap __traceiter_block_rq_remap __traceiter_xfs_reflink_cow_remap_from __traceiter_xfs_reflink_cow_remap_skip __traceiter_xfs_reflink_cow_remap_to __traceiter_xfs_reflink_remap_blocks __traceiter_xfs_reflink_remap_blocks_error __traceiter_xfs_reflink_remap_extent_dest __traceiter_xfs_reflink_remap_extent_error __traceiter_xfs_reflink_remap_extent_src __traceiter_xfs_reflink_remap_range __traceiter_xfs_reflink_remap_range_error __traceiter_xfs_swap_extent_rmap_remap __traceiter_xfs_swap_extent_rmap_remap_piece __x64_sys_mremap __x64_sys_remap_file_pages aio_ring_mremap arch_memremap_wb bitmap_bitremap bitmap_remap blk_add_trace_rq_remap blk_log_remap btrfs_remap_file_range calc_plane_remap_info chipio_remap_stream cifs_remap cifs_remap_file_range dax_remap_file_range_prep devm_ioremap devm_ioremap_match devm_ioremap_release devm_ioremap_resource devm_ioremap_resource_wc devm_ioremap_uc devm_ioremap_wc devm_memremap devm_memremap_match devm_memremap_pages devm_memremap_pages_release devm_memremap_release devm_nvdimm_memremap devm_pci_remap_cfg_resource devm_pci_remap_cfgspace devm_pci_remap_iospace devm_platform_get_and_ioremap_resource devm_platform_ioremap_resource devm_platform_ioremap_resource_byname disable_irq_remapping dma_common_contiguous_remap dma_common_free_remap dma_common_pages_remap dmar_walk_remapping_entries do_remap_pfn_range fbcon_remap_all generic_remap_check_len generic_remap_file_range_prep intel_fb_needs_pot_stride_remap intel_irq_remap_add_device intel_irq_remapping_activate intel_irq_remapping_alloc intel_irq_remapping_deactivate intel_irq_remapping_free intel_irq_remapping_select intel_plane_can_remap intel_remap_pages intel_remapped_info_size intel_setup_irq_remapping iommu_disable_irq_remapping iommu_enable_irq_remapping iommu_set_irq_remapping ioremap ioremap_cache ioremap_change_attr ioremap_encrypted ioremap_page_range ioremap_prot ioremap_uc ioremap_wc ioremap_wt ip_mc_remap ipv6_mc_remap irq_remapping_cap irq_remapping_disable irq_remapping_reenable irq_remapping_restore_boot_irq_mode mddev_trace_remap mddev_trace_remap mddev_trace_remap memremap memremap_compat_align memremap_pages mremap_at mremap_to mremap_userfaultfd_complete mremap_userfaultfd_fail mremap_userfaultfd_prep nfs42_remap_file_range ocfs2_reflink_remap_blocks ocfs2_remap_file_range ovl_remap_file_range panic_if_irq_remap pci_ioremap_bar pci_ioremap_wc_bar pci_remap_iospace perf_trace_block_rq_remap perf_trace_xfs_reflink_remap_blocks reenable_irq_remapping remap remap_and_issue_overwrite remap_and_issue_shared_cell remap_contiguous_pages remap_exception remap_io_mapping remap_io_sg remap_oldmem_pfn_range remap_pfn remap_pfn_range remap_pfn_range_complete remap_pfn_range_prepare remap_sg remap_to_origin remap_to_origin_and_cache remap_to_origin_and_issue remap_to_origin_clear_discard remap_verify_area remap_vmalloc_range remap_vmalloc_range_partial remapped_nvme_show rnbd_clt_remap_dev_show rnbd_clt_remap_dev_store rnbd_clt_remap_device set_irq_remapping_broken special_mapping_mremap trace_event_raw_event_block_bio_remap trace_event_raw_event_block_rq_remap trace_event_raw_event_xfs_reflink_remap_blocks trace_raw_output_block_bio_remap trace_raw_output_block_rq_remap trace_raw_output_xfs_reflink_remap_blocks trace_xfs_reflink_cow_remap_from trace_xfs_reflink_cow_remap_to trace_xfs_reflink_remap_range_error trace_xfs_swap_extent_rmap_remap_piece tramp_mremap vdso_mremap virtqueue_add_inbuf_premapped virtqueue_add_outbuf_premapped vmemmap_remap_pte vmemmap_remap_range xfs_bmapi_remap xfs_file_remap_range xfs_iunlock2_remapping xfs_reflink_remap_blocks xfs_reflink_remap_extent xfs_reflink_remap_prep xol_mremap] 2025/12/31 04:09:25 coverage filter: remap_and_issue_overwrite: [] 2025/12/31 04:09:25 coverage filter: remap_and_issue_shared_cell: [] 2025/12/31 04:09:25 coverage filter: remap_exception: [] 2025/12/31 04:09:25 coverage filter: remap_to_origin_and_cache: [] 2025/12/31 04:09:25 coverage filter: req_attempt_discard_merge: [req_attempt_discard_merge] 2025/12/31 04:09:25 coverage filter: request_endio: [] 2025/12/31 04:09:25 coverage filter: schedule_copy: [schedule_copy] 2025/12/31 04:09:25 coverage filter: schedule_zero: [schedule_zero] 2025/12/31 04:09:25 coverage filter: scrub_read_endio: [] 2025/12/31 04:09:25 coverage filter: scrub_repair_read_endio: [] 2025/12/31 04:09:25 coverage filter: scrub_stripe: [flush_scrub_stripes init_scrub_stripe release_scrub_stripe scrub_stripe scrub_stripe_get_kaddr scrub_stripe_read_repair_worker scrub_stripe_submit_repair_read wait_scrub_stripe_io] 2025/12/31 04:09:25 coverage filter: scrub_stripe_submit_repair_read: [] 2025/12/31 04:09:25 coverage filter: scrub_submit_initial_read: [scrub_submit_initial_read] 2025/12/31 04:09:25 coverage filter: scrub_submit_write_bio: [scrub_submit_write_bio] 2025/12/31 04:09:25 coverage filter: scrub_write_endio: [] 2025/12/31 04:09:25 coverage filter: scrub_write_sectors: [scrub_write_sectors] 2025/12/31 04:09:25 coverage filter: scsi_io_completion_action: [scsi_io_completion_action] 2025/12/31 04:09:25 coverage filter: sd_setup_protect_cmnd: [sd_setup_protect_cmnd] 2025/12/31 04:09:25 coverage filter: should_fail_bio: [should_fail_bio] 2025/12/31 04:09:25 coverage filter: simple_end_io_work: [simple_end_io_work] 2025/12/31 04:09:25 coverage filter: snapshot_map: [snapshot_map] 2025/12/31 04:09:25 coverage filter: snapshot_merge_map: [snapshot_merge_map] 2025/12/31 04:09:25 coverage filter: squashfs_bio_read: [squashfs_bio_read] 2025/12/31 04:09:25 coverage filter: squashfs_read_data: [squashfs_read_data] 2025/12/31 04:09:25 coverage filter: squashfs_xz_uncompress: [squashfs_xz_uncompress] 2025/12/31 04:09:25 coverage filter: sr_done: [sr_done] 2025/12/31 04:09:25 coverage filter: start_full_bio: [start_full_bio] 2025/12/31 04:09:25 coverage filter: stripe_map: [stripe_map stripe_map_range stripe_map_sector] 2025/12/31 04:09:25 coverage filter: stripe_map_range: [] 2025/12/31 04:09:25 coverage filter: submit_bh_wbc: [submit_bh_wbc] 2025/12/31 04:09:25 coverage filter: submit_bio: [hfsplus_submit_bio iomap_dio_submit_bio submit_bio submit_bio_noacct submit_bio_noacct_nocheck submit_bio_wait zram_submit_bio] 2025/12/31 04:09:25 coverage filter: submit_bio_noacct: [] 2025/12/31 04:09:25 coverage filter: submit_bio_wait: [] 2025/12/31 04:09:25 coverage filter: submit_bio_wait_endio: [] 2025/12/31 04:09:25 coverage filter: submit_extent_folio: [submit_extent_folio] 2025/12/31 04:09:25 coverage filter: submit_io: [f2fs_dio_write_submit_io r5l_do_submit_io r5l_submit_io_async submit_io xfs_dio_zoned_submit_io] 2025/12/31 04:09:25 coverage filter: submit_one_bio: [submit_one_bio] 2025/12/31 04:09:25 coverage filter: submit_page_section: [submit_page_section] 2025/12/31 04:09:25 coverage filter: submit_read_wait_bio_list: [submit_read_wait_bio_list] 2025/12/31 04:09:25 coverage filter: submit_write_bios: [submit_write_bios] 2025/12/31 04:09:25 coverage filter: super_written: [super_written] 2025/12/31 04:09:25 coverage filter: swap_read_folio: [swap_read_folio] 2025/12/31 04:09:25 coverage filter: swap_read_page: [swap_read_page] 2025/12/31 04:09:25 coverage filter: sync_page_io: [r10_sync_page_io r1_sync_page_io sync_page_io] 2025/12/31 04:09:25 coverage filter: tg_dispatch_bps_time: [tg_dispatch_bps_time] 2025/12/31 04:09:25 coverage filter: tg_dispatch_time: [tg_dispatch_time] 2025/12/31 04:09:25 coverage filter: thin_map: [thin_map] 2025/12/31 04:09:25 coverage filter: trace_event_raw_event_bcache_bio: [trace_event_raw_event_bcache_bio] 2025/12/31 04:09:25 coverage filter: trace_event_raw_event_bcache_journal_write: [trace_event_raw_event_bcache_journal_write] 2025/12/31 04:09:25 coverage filter: trace_event_raw_event_bcache_read: [trace_event_raw_event_bcache_read] 2025/12/31 04:09:25 coverage filter: trace_event_raw_event_bcache_request: [trace_event_raw_event_bcache_request] 2025/12/31 04:09:25 coverage filter: trace_event_raw_event_bcache_write: [trace_event_raw_event_bcache_write] 2025/12/31 04:09:25 coverage filter: trace_event_raw_event_blkdev_zone_mgmt: [] 2025/12/31 04:09:25 coverage filter: trace_event_raw_event_block_bio: [trace_event_raw_event_block_bio] 2025/12/31 04:09:25 coverage filter: trace_event_raw_event_block_bio_complete: [] 2025/12/31 04:09:25 coverage filter: trace_event_raw_event_block_bio_remap: [] 2025/12/31 04:09:25 coverage filter: trace_event_raw_event_block_split: [trace_event_raw_event_block_split] 2025/12/31 04:09:25 coverage filter: trace_event_raw_event_btrfs_raid56_bio: [trace_event_raw_event_btrfs_raid56_bio] 2025/12/31 04:09:25 coverage filter: trace_event_raw_event_f2fs__bio: [trace_event_raw_event_f2fs__bio] 2025/12/31 04:09:25 coverage filter: uuid_endio: [] 2025/12/31 04:09:25 coverage filter: uuid_io: [uuid_io uuid_io_unlock] 2025/12/31 04:09:25 coverage filter: verity_end_io: [verity_end_io] 2025/12/31 04:09:25 coverage filter: verity_finish_io: [verity_finish_io] 2025/12/31 04:09:25 coverage filter: verity_map: [verity_map] 2025/12/31 04:09:25 coverage filter: verity_verify_io: [verity_verify_io] 2025/12/31 04:09:25 coverage filter: virtblk_prep_rq: [virtblk_prep_rq] 2025/12/31 04:09:25 coverage filter: wc_add_block: [wc_add_block] 2025/12/31 04:09:25 coverage filter: write_all_supers: [write_all_supers] 2025/12/31 04:09:25 coverage filter: write_bdev_super_endio: [] 2025/12/31 04:09:25 coverage filter: write_dirty: [__write_dirty_buffer __write_dirty_buffers_async btrfs_write_dirty_block_groups dm_bufio_write_dirty_buffers dm_bufio_write_dirty_buffers_async write_dirty write_dirty_buffer write_dirty_finish] 2025/12/31 04:09:25 coverage filter: write_dirty_finish: [] 2025/12/31 04:09:25 coverage filter: write_moving: [write_moving write_moving_finish] 2025/12/31 04:09:25 coverage filter: write_one_eb: [write_one_eb] 2025/12/31 04:09:25 coverage filter: write_page: [__bpf_trace_ext4_da_write_pages_extent __probestub_ext4_da_write_pages_extent __probestub_pnfs_mds_fallback_write_pagelist __traceiter_ext4_da_write_pages_extent __traceiter_pnfs_mds_fallback_write_pagelist do_write_page ff_layout_write_pagelist filelayout_write_pagelist hci_cc_write_page_scan_activity hci_cc_write_page_scan_type lan88xx_write_page perf_trace_ext4_da_write_pages_extent phy_write_paged rtl821x_write_page swap_write_page trace_event_raw_event_ext4_da_write_pages_extent trace_raw_output_ext4_da_write_pages_extent write_page xdr_write_pages] 2025/12/31 04:09:25 coverage filter: write_super_endio: [] 2025/12/31 04:09:25 coverage filter: writecache_end_io: [writecache_end_io] 2025/12/31 04:09:25 coverage filter: writecache_flush_thread: [writecache_flush_thread] 2025/12/31 04:09:25 coverage filter: writecache_map: [] 2025/12/31 04:09:25 coverage filter: writecache_map_discard: [] 2025/12/31 04:09:25 coverage filter: writecache_writeback: [writecache_writeback] 2025/12/31 04:09:25 coverage filter: xfs_buf_bio_end_io: [xfs_buf_bio_end_io] 2025/12/31 04:09:25 coverage filter: xfs_buf_submit: [__probestub_xfs_buf_submit __traceiter_xfs_buf_submit xfs_buf_submit] 2025/12/31 04:09:25 coverage filter: xfs_dio_zoned_submit_io: [] 2025/12/31 04:09:25 coverage filter: xfs_discard_endio: [] 2025/12/31 04:09:25 coverage filter: xfs_discard_extents: [xfs_discard_extents] 2025/12/31 04:09:25 coverage filter: xfs_end_bio: [xfs_end_bio] 2025/12/31 04:09:25 coverage filter: xfs_mark_rtg_boundary: [xfs_mark_rtg_boundary] 2025/12/31 04:09:25 coverage filter: xfs_rw_bdev: [xfs_rw_bdev] 2025/12/31 04:09:25 coverage filter: xfs_submit_zoned_bio: [xfs_submit_zoned_bio] 2025/12/31 04:09:25 coverage filter: xfs_writeback_submit: [xfs_writeback_submit] 2025/12/31 04:09:25 coverage filter: xfs_zone_alloc_and_submit: [xfs_zone_alloc_and_submit] 2025/12/31 04:09:25 coverage filter: xfs_zone_gc_prepare_reset: [xfs_zone_gc_prepare_reset] 2025/12/31 04:09:25 coverage filter: xfs_zoned_gcd: [xfs_zoned_gcd] 2025/12/31 04:09:25 coverage filter: xfs_zoned_writeback_submit: [xfs_zoned_writeback_submit] 2025/12/31 04:09:25 coverage filter: xlog_bio_end_io: [xlog_bio_end_io] 2025/12/31 04:09:25 coverage filter: xlog_write_iclog: [xlog_write_iclog] 2025/12/31 04:09:25 coverage filter: z_erofs_endio: [] 2025/12/31 04:09:25 coverage filter: z_erofs_runqueue: [z_erofs_runqueue] 2025/12/31 04:09:25 coverage filter: zero_callback: [zero_callback] 2025/12/31 04:09:25 coverage filter: zero_exception: [zero_exception] 2025/12/31 04:09:25 coverage filter: zero_fill_bio_iter: [zero_fill_bio_iter] 2025/12/31 04:09:25 coverage filter: zero_map: [zero_map] 2025/12/31 04:09:25 coverage filter: zlib_uncompress: [zlib_uncompress] 2025/12/31 04:09:25 coverage filter: zram_submit_bio: [] 2025/12/31 04:09:25 coverage filter: zstd_uncompress: [zstd_uncompress] 2025/12/31 04:09:25 coverage filter: block/bio.c: [block/bio.c] 2025/12/31 04:09:25 coverage filter: block/blk.h: [] 2025/12/31 04:09:25 coverage filter: include/linux/blk_types.h: [] 2025/12/31 04:09:25 coverage filter: io_uring/rsrc.c: [io_uring/rsrc.c] 2025/12/31 04:09:25 area "symbols": 31012 PCs in the cover filter 2025/12/31 04:09:25 area "files": 1986 PCs in the cover filter 2025/12/31 04:09:25 area "": 0 PCs in the cover filter 2025/12/31 04:09:25 executor cover filter: 0 PCs 2025/12/31 04:09:27 machine check: disabled the following syscalls: mount$esdfs : /proc/filesystems does not contain esdfs mount$incfs : /proc/filesystems does not contain incremental-fs openat$acpi_thermal_rel : failed to open /dev/acpi_thermal_rel: no such file or directory openat$ashmem : failed to open /dev/ashmem: no such file or directory openat$bifrost : failed to open /dev/bifrost: no such file or directory openat$binder : failed to open /dev/binder: no such file or directory openat$camx : failed to open /dev/v4l/by-path/platform-soc@0:qcom_cam-req-mgr-video-index0: no such file or directory openat$capi20 : failed to open /dev/capi20: no such file or directory openat$cdrom1 : failed to open /dev/cdrom1: no such file or directory openat$damon_attrs : failed to open /sys/kernel/debug/damon/attrs: no such file or directory openat$damon_init_regions : failed to open /sys/kernel/debug/damon/init_regions: no such file or directory openat$damon_kdamond_pid : failed to open /sys/kernel/debug/damon/kdamond_pid: no such file or directory openat$damon_mk_contexts : failed to open /sys/kernel/debug/damon/mk_contexts: no such file or directory openat$damon_monitor_on : failed to open /sys/kernel/debug/damon/monitor_on: no such file or directory openat$damon_rm_contexts : failed to open /sys/kernel/debug/damon/rm_contexts: no such file or directory openat$damon_schemes : failed to open /sys/kernel/debug/damon/schemes: no such file or directory openat$damon_target_ids : failed to open /sys/kernel/debug/damon/target_ids: no such file or directory openat$hwbinder : failed to open /dev/hwbinder: no such file or directory openat$i915 : failed to open /dev/i915: no such file or directory openat$img_rogue : failed to open /dev/img-rogue: no such file or directory openat$irnet : failed to open /dev/irnet: no such file or directory openat$keychord : failed to open /dev/keychord: no such file or directory openat$kvm : failed to open /dev/kvm: no such file or directory openat$lightnvm : failed to open /dev/lightnvm/control: no such file or directory openat$mali : failed to open /dev/mali0: no such file or directory openat$md : failed to open /dev/md0: no such file or directory openat$msm : failed to open /dev/msm: no such file or directory openat$ndctl0 : failed to open /dev/ndctl0: no such file or directory openat$nmem0 : failed to open /dev/nmem0: no such file or directory openat$pktcdvd : failed to open /dev/pktcdvd/control: no such file or directory openat$pmem0 : failed to open /dev/pmem0: no such file or directory openat$proc_capi20 : failed to open /proc/capi/capi20: no such file or directory openat$proc_capi20ncci : failed to open /proc/capi/capi20ncci: no such file or directory openat$proc_reclaim : failed to open /proc/self/reclaim: no such file or directory openat$ptp1 : failed to open /dev/ptp1: no such file or directory openat$rnullb : failed to open /dev/rnullb0: no such file or directory openat$selinux_access : failed to open /selinux/access: no such file or directory openat$selinux_attr : selinux is not enabled openat$selinux_avc_cache_stats : failed to open /selinux/avc/cache_stats: no such file or directory openat$selinux_avc_cache_threshold : failed to open /selinux/avc/cache_threshold: no such file or directory openat$selinux_avc_hash_stats : failed to open /selinux/avc/hash_stats: no such file or directory openat$selinux_checkreqprot : failed to open /selinux/checkreqprot: no such file or directory openat$selinux_commit_pending_bools : failed to open /selinux/commit_pending_bools: no such file or directory openat$selinux_context : failed to open /selinux/context: no such file or directory openat$selinux_create : failed to open /selinux/create: no such file or directory openat$selinux_enforce : failed to open /selinux/enforce: no such file or directory openat$selinux_load : failed to open /selinux/load: no such file or directory openat$selinux_member : failed to open /selinux/member: no such file or directory openat$selinux_mls : failed to open /selinux/mls: no such file or directory openat$selinux_policy : failed to open /selinux/policy: no such file or directory openat$selinux_relabel : failed to open /selinux/relabel: no such file or directory openat$selinux_status : failed to open /selinux/status: no such file or directory openat$selinux_user : failed to open /selinux/user: no such file or directory openat$selinux_validatetrans : failed to open /selinux/validatetrans: no such file or directory openat$sev : failed to open /dev/sev: no such file or directory openat$sgx_provision : failed to open /dev/sgx_provision: no such file or directory openat$smack_task_current : smack is not enabled openat$smack_thread_current : smack is not enabled openat$smackfs_access : failed to open /sys/fs/smackfs/access: no such file or directory openat$smackfs_ambient : failed to open /sys/fs/smackfs/ambient: no such file or directory openat$smackfs_change_rule : failed to open /sys/fs/smackfs/change-rule: no such file or directory openat$smackfs_cipso : failed to open /sys/fs/smackfs/cipso: no such file or directory openat$smackfs_cipsonum : failed to open /sys/fs/smackfs/direct: no such file or directory openat$smackfs_ipv6host : failed to open /sys/fs/smackfs/ipv6host: no such file or directory openat$smackfs_load : failed to open /sys/fs/smackfs/load: no such file or directory openat$smackfs_logging : failed to open /sys/fs/smackfs/logging: no such file or directory openat$smackfs_netlabel : failed to open /sys/fs/smackfs/netlabel: no such file or directory openat$smackfs_onlycap : failed to open /sys/fs/smackfs/onlycap: no such file or directory openat$smackfs_ptrace : failed to open /sys/fs/smackfs/ptrace: no such file or directory openat$smackfs_relabel_self : failed to open /sys/fs/smackfs/relabel-self: no such file or directory openat$smackfs_revoke_subject : failed to open /sys/fs/smackfs/revoke-subject: no such file or directory openat$smackfs_syslog : failed to open /sys/fs/smackfs/syslog: no such file or directory openat$smackfs_unconfined : failed to open /sys/fs/smackfs/unconfined: no such file or directory openat$tlk_device : failed to open /dev/tlk_device: no such file or directory openat$trusty : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_avb : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_gatekeeper : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwkey : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwrng : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km_secure : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_storage : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$tty : failed to open /dev/tty: no such device or address openat$uverbs0 : failed to open /dev/infiniband/uverbs0: no such file or directory openat$vfio : failed to open /dev/vfio/vfio: no such file or directory openat$vndbinder : failed to open /dev/vndbinder: no such file or directory openat$vtpm : failed to open /dev/vtpmx: no such file or directory openat$xenevtchn : failed to open /dev/xen/evtchn: no such file or directory openat$zygote : failed to open /dev/socket/zygote: no such file or directory read$smackfs_access : smack is not enabled read$smackfs_cipsonum : smack is not enabled read$smackfs_logging : smack is not enabled read$smackfs_ptrace : smack is not enabled socket$hf : socket$hf(0x13, 0x2, 0x0) failed: address family not supported by protocol socket$inet6_dccp : socket$inet6_dccp(0xa, 0x6, 0x0) failed: socket type not supported socket$inet_dccp : socket$inet_dccp(0x2, 0x6, 0x0) failed: socket type not supported socket$vsock_dgram : socket$vsock_dgram(0x28, 0x2, 0x0) failed: no such device syz_mount_image$bcachefs : /proc/filesystems does not contain bcachefs syz_mount_image$ntfs : /proc/filesystems does not contain ntfs syz_mount_image$reiserfs : /proc/filesystems does not contain reiserfs syz_mount_image$sysv : /proc/filesystems does not contain sysv syz_mount_image$v7 : /proc/filesystems does not contain v7 write$selinux_access : selinux is not enabled write$selinux_attr : selinux is not enabled write$selinux_context : selinux is not enabled write$selinux_create : selinux is not enabled write$selinux_load : selinux is not enabled write$selinux_user : selinux is not enabled write$selinux_validatetrans : selinux is not enabled write$smack_current : smack is not enabled write$smackfs_access : smack is not enabled write$smackfs_change_rule : smack is not enabled write$smackfs_cipso : smack is not enabled write$smackfs_cipsonum : smack is not enabled write$smackfs_ipv6host : smack is not enabled write$smackfs_label : smack is not enabled write$smackfs_labels_list : smack is not enabled write$smackfs_load : smack is not enabled write$smackfs_logging : smack is not enabled write$smackfs_netlabel : smack is not enabled write$smackfs_ptrace : smack is not enabled transitively disabled the following syscalls (missing resource [creating syscalls]): accept$ax25 : sock_ax25 [accept$ax25 accept4$ax25 syz_init_net_socket$ax25] accept$netrom : sock_netrom [accept$netrom accept4$netrom syz_init_net_socket$netrom] accept$nfc_llcp : sock_nfc_llcp [accept$nfc_llcp accept4$nfc_llcp syz_init_net_socket$nfc_llcp] close$binfmt : fd_binfmt [openat$binfmt] close$fd_v4l2_buffer : fd_v4l2_buffer [ioctl$VIDIOC_QUERYBUF_DMABUF] close$ibv_device : fd_rdma [openat$uverbs0] futimesat : time_usec [getitimer getrusage getsockopt$sock_timeval ...] mmap$DRM_I915 : fd_i915 [openat$i915] mmap$DRM_MSM : fd_msm [openat$msm] mmap$KVM_VCPU : vcpu_mmap_size [ioctl$KVM_GET_VCPU_MMAP_SIZE] mmap$bifrost : fd_bifrost [openat$bifrost openat$mali] mmap$perf : fd_perf [perf_event_open perf_event_open$cgroup] mmap$snddsp : fd_snd_dsp [syz_open_dev$sndpcmc syz_open_dev$sndpcmp] mmap$snddsp_control : fd_snd_dsp [syz_open_dev$sndpcmc syz_open_dev$sndpcmp] mmap$snddsp_status : fd_snd_dsp [syz_open_dev$sndpcmc syz_open_dev$sndpcmp] mmap$usbfs : fd_usbfs [syz_open_dev$usbfs] mmap$usbmon : fd_usbmon [syz_open_dev$usbmon] mount$9p_fd : rfd9p [pipe2$9p] openat$binfmt : ptr_binfmt_file [syz_create_resource$binfmt] read$char_usb : fd_char_usb [syz_open_dev$char_usb] read$hiddev : fd_hiddev [syz_open_dev$hiddev] read$hidraw : fd_hidraw [syz_open_dev$hidraw] read$midi : fd_midi [syz_open_dev$admmidi syz_open_dev$amidi syz_open_dev$dmmidi syz_open_dev$midi syz_open_dev$sndmidi] read$msr : fd_msr [syz_open_dev$MSR] read$snddsp : fd_snd_dsp [syz_open_dev$sndpcmc syz_open_dev$sndpcmp] read$sndhw : fd_snd_hw [syz_open_dev$sndhw] read$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] read$usbfs : fd_usbfs [syz_open_dev$usbfs] read$usbmon : fd_usbmon [syz_open_dev$usbmon] read$watch_queue : fd_watch_queue [pipe2$watch_queue] select : time_usec [getitimer getrusage getsockopt$sock_timeval ...] setsockopt$IP6T_SO_SET_REPLACE : fd_bpf_prog [bpf$BPF_PROG_GET_FD_BY_ID bpf$BPF_PROG_RAW_TRACEPOINT_LOAD bpf$BPF_PROG_WITH_BTFID_LOAD ...] setsockopt$IPT_SO_SET_REPLACE : fd_bpf_prog [bpf$BPF_PROG_GET_FD_BY_ID bpf$BPF_PROG_RAW_TRACEPOINT_LOAD bpf$BPF_PROG_WITH_BTFID_LOAD ...] setsockopt$SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: time_usec [getitimer getrusage getsockopt$sock_timeval ...] setsockopt$WPAN_SECURITY : sock_802154_dgram [syz_init_net_socket$802154_dgram] setsockopt$WPAN_SECURITY_LEVEL : sock_802154_dgram [syz_init_net_socket$802154_dgram] setsockopt$WPAN_WANTACK : sock_802154_dgram [syz_init_net_socket$802154_dgram] setsockopt$WPAN_WANTLQI : sock_802154_dgram [syz_init_net_socket$802154_dgram] setsockopt$X25_QBITINCL : sock_x25 [accept4$x25 syz_init_net_socket$x25] setsockopt$ax25_SO_BINDTODEVICE : sock_ax25 [accept$ax25 accept4$ax25 syz_init_net_socket$ax25] setsockopt$ax25_int : sock_ax25 [accept$ax25 accept4$ax25 syz_init_net_socket$ax25] setsockopt$bt_BT_CHANNEL_POLICY : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_BT_DEFER_SETUP : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_BT_FLUSHABLE : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_BT_POWER : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_BT_RCVMTU : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_BT_SECURITY : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_BT_SNDMTU : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_BT_VOICE : sock_bt [accept4$bt_l2cap syz_init_net_socket$bt_bnep syz_init_net_socket$bt_cmtp ...] setsockopt$bt_hci_HCI_DATA_DIR : sock_bt_hci [syz_init_net_socket$bt_hci] setsockopt$bt_hci_HCI_FILTER : sock_bt_hci [syz_init_net_socket$bt_hci] setsockopt$bt_hci_HCI_TIME_STAMP : sock_bt_hci [syz_init_net_socket$bt_hci] setsockopt$bt_l2cap_L2CAP_CONNINFO : sock_bt_l2cap [accept4$bt_l2cap syz_init_net_socket$bt_l2cap] setsockopt$bt_l2cap_L2CAP_LM : sock_bt_l2cap [accept4$bt_l2cap syz_init_net_socket$bt_l2cap] setsockopt$bt_l2cap_L2CAP_OPTIONS : sock_bt_l2cap [accept4$bt_l2cap syz_init_net_socket$bt_l2cap] setsockopt$bt_rfcomm_RFCOMM_LM : sock_bt_rfcomm [syz_init_net_socket$bt_rfcomm] setsockopt$inet6_dccp_buf : sock_dccp6 [socket$inet6_dccp] setsockopt$inet6_dccp_int : sock_dccp6 [socket$inet6_dccp] setsockopt$inet_dccp_buf : sock_dccp [socket$inet_dccp] setsockopt$inet_dccp_int : sock_dccp [socket$inet_dccp] setsockopt$inet_sctp6_SCTP_ADD_STREAMS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_ASSOCINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_AUTH_DEACTIVATE_KEY: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_AUTH_DELETE_KEY : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_AUTH_KEY : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_CONTEXT : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_DEFAULT_PRINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_DEFAULT_SEND_PARAM: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_DEFAULT_SNDINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_DELAYED_SACK : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_ENABLE_STREAM_RESET: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_MAXSEG : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_MAX_BURST : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_PEER_ADDR_THLDS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_PRIMARY_ADDR : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_PR_SUPPORTED : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_RECONFIG_SUPPORTED: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_RESET_ASSOC : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_RESET_STREAMS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_RTOINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_SET_PEER_PRIMARY_ADDR: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_STREAM_SCHEDULER : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp6_SCTP_STREAM_SCHEDULER_VALUE: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_ADD_STREAMS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_ASSOCINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_AUTH_ACTIVE_KEY : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_AUTH_DEACTIVATE_KEY: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_AUTH_DELETE_KEY : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_AUTH_KEY : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_CONTEXT : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_DEFAULT_PRINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_DEFAULT_SEND_PARAM: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_DEFAULT_SNDINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_DELAYED_SACK : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_ENABLE_STREAM_RESET: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_MAXSEG : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_MAX_BURST : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_PEER_ADDR_PARAMS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_PEER_ADDR_THLDS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_PRIMARY_ADDR : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_PR_SUPPORTED : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_RECONFIG_SUPPORTED: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_RESET_ASSOC : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_RESET_STREAMS : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_RTOINFO : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_SET_PEER_PRIMARY_ADDR: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_STREAM_SCHEDULER : assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$inet_sctp_SCTP_STREAM_SCHEDULER_VALUE: assoc_id [getsockopt$inet_sctp6_SCTP_ASSOCINFO getsockopt$inet_sctp6_SCTP_AUTH_ACTIVE_KEY getsockopt$inet_sctp6_SCTP_CONTEXT ...] setsockopt$llc_int : sock_llc [accept4$llc syz_init_net_socket$llc] setsockopt$netrom_NETROM_IDLE : sock_netrom [accept$netrom accept4$netrom syz_init_net_socket$netrom] setsockopt$netrom_NETROM_N2 : sock_netrom [accept$netrom accept4$netrom syz_init_net_socket$netrom] setsockopt$netrom_NETROM_T1 : sock_netrom [accept$netrom accept4$netrom syz_init_net_socket$netrom] setsockopt$netrom_NETROM_T2 : sock_netrom [accept$netrom accept4$netrom syz_init_net_socket$netrom] setsockopt$netrom_NETROM_T4 : sock_netrom [accept$netrom accept4$netrom syz_init_net_socket$netrom] setsockopt$nfc_llcp_NFC_LLCP_MIUX : sock_nfc_llcp [accept$nfc_llcp accept4$nfc_llcp syz_init_net_socket$nfc_llcp] setsockopt$nfc_llcp_NFC_LLCP_RW : sock_nfc_llcp [accept$nfc_llcp accept4$nfc_llcp syz_init_net_socket$nfc_llcp] setsockopt$rose : sock_rose [accept4$rose syz_init_net_socket$rose] setsockopt$sock_attach_bpf : fd_bpf_prog [bpf$BPF_PROG_GET_FD_BY_ID bpf$BPF_PROG_RAW_TRACEPOINT_LOAD bpf$BPF_PROG_WITH_BTFID_LOAD ...] setsockopt$sock_timeval : time_usec [getitimer getrusage getsockopt$sock_timeval ...] syz_memcpy_off$KVM_EXIT_HYPERCALL : kvm_run_ptr [mmap$KVM_VCPU] syz_memcpy_off$KVM_EXIT_MMIO : kvm_run_ptr [mmap$KVM_VCPU] utimensat : time_usec [getitimer getrusage getsockopt$sock_timeval ...] utimes : time_usec [getitimer getrusage getsockopt$sock_timeval ...] write$9p : wfd9p [pipe2$9p] write$ALLOC_MW : fd_rdma [openat$uverbs0] write$ALLOC_PD : fd_rdma [openat$uverbs0] write$ATTACH_MCAST : fd_rdma [openat$uverbs0] write$CLOSE_XRCD : fd_rdma [openat$uverbs0] write$CREATE_AH : fd_rdma [openat$uverbs0] write$CREATE_COMP_CHANNEL : fd_rdma [openat$uverbs0] write$CREATE_CQ : fd_rdma [openat$uverbs0] write$CREATE_CQ_EX : fd_rdma [openat$uverbs0] write$CREATE_FLOW : fd_rdma [openat$uverbs0] write$CREATE_QP : fd_rdma [openat$uverbs0] write$CREATE_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$CREATE_SRQ : fd_rdma [openat$uverbs0] write$CREATE_WQ : fd_rdma [openat$uverbs0] write$DEALLOC_MW : fd_rdma [openat$uverbs0] write$DEALLOC_PD : fd_rdma [openat$uverbs0] write$DEREG_MR : fd_rdma [openat$uverbs0] write$DESTROY_AH : fd_rdma [openat$uverbs0] write$DESTROY_CQ : fd_rdma [openat$uverbs0] write$DESTROY_FLOW : fd_rdma [openat$uverbs0] write$DESTROY_QP : fd_rdma [openat$uverbs0] write$DESTROY_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$DESTROY_SRQ : fd_rdma [openat$uverbs0] write$DESTROY_WQ : fd_rdma [openat$uverbs0] write$DETACH_MCAST : fd_rdma [openat$uverbs0] write$MLX5_ALLOC_PD : fd_rdma [openat$uverbs0] write$MLX5_CREATE_CQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_DV_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_SRQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_WQ : fd_rdma [openat$uverbs0] write$MLX5_GET_CONTEXT : fd_rdma [openat$uverbs0] write$MLX5_MODIFY_WQ : fd_rdma [openat$uverbs0] write$MODIFY_QP : fd_rdma [openat$uverbs0] write$MODIFY_SRQ : fd_rdma [openat$uverbs0] write$OPEN_XRCD : fd_rdma [openat$uverbs0] write$P9_RATTACH : wfd9p [pipe2$9p] write$P9_RAUTH : wfd9p [pipe2$9p] write$P9_RCLUNK : wfd9p [pipe2$9p] write$P9_RCREATE : wfd9p [pipe2$9p] write$P9_RFLUSH : wfd9p [pipe2$9p] write$P9_RFSYNC : wfd9p [pipe2$9p] write$P9_RGETATTR : wfd9p [pipe2$9p] write$P9_RGETLOCK : wfd9p [pipe2$9p] write$P9_RLCREATE : wfd9p [pipe2$9p] write$P9_RLERROR : wfd9p [pipe2$9p] write$P9_RLERRORu : wfd9p [pipe2$9p] write$P9_RLINK : wfd9p [pipe2$9p] write$P9_RLOCK : wfd9p [pipe2$9p] write$P9_RLOPEN : wfd9p [pipe2$9p] write$P9_RMKDIR : wfd9p [pipe2$9p] write$P9_RMKNOD : wfd9p [pipe2$9p] write$P9_ROPEN : wfd9p [pipe2$9p] write$P9_RREAD : wfd9p [pipe2$9p] write$P9_RREADDIR : wfd9p [pipe2$9p] write$P9_RREADLINK : wfd9p [pipe2$9p] write$P9_RREMOVE : wfd9p [pipe2$9p] write$P9_RRENAME : wfd9p [pipe2$9p] write$P9_RRENAMEAT : wfd9p [pipe2$9p] write$P9_RSETATTR : wfd9p [pipe2$9p] write$P9_RSTAT : wfd9p [pipe2$9p] write$P9_RSTATFS : wfd9p [pipe2$9p] write$P9_RSTATu : wfd9p [pipe2$9p] write$P9_RSYMLINK : wfd9p [pipe2$9p] write$P9_RUNLINKAT : wfd9p [pipe2$9p] write$P9_RVERSION : wfd9p [pipe2$9p] write$P9_RWALK : wfd9p [pipe2$9p] write$P9_RWRITE : wfd9p [pipe2$9p] write$P9_RWSTAT : wfd9p [pipe2$9p] write$P9_RXATTRCREATE : wfd9p [pipe2$9p] write$P9_RXATTRWALK : wfd9p [pipe2$9p] write$POLL_CQ : fd_rdma [openat$uverbs0] write$POST_RECV : fd_rdma [openat$uverbs0] write$POST_SEND : fd_rdma [openat$uverbs0] write$POST_SRQ_RECV : fd_rdma [openat$uverbs0] write$QUERY_DEVICE_EX : fd_rdma [openat$uverbs0] write$QUERY_PORT : fd_rdma [openat$uverbs0] write$QUERY_QP : fd_rdma [openat$uverbs0] write$QUERY_SRQ : fd_rdma [openat$uverbs0] write$REG_MR : fd_rdma [openat$uverbs0] write$REQ_NOTIFY_CQ : fd_rdma [openat$uverbs0] write$REREG_MR : fd_rdma [openat$uverbs0] write$RESIZE_CQ : fd_rdma [openat$uverbs0] write$binfmt_aout : fd_binfmt [openat$binfmt] write$binfmt_elf32 : fd_binfmt [openat$binfmt] write$binfmt_elf64 : fd_binfmt [openat$binfmt] write$binfmt_misc : fd_binfmt [openat$binfmt] write$binfmt_script : fd_binfmt [openat$binfmt] write$bt_hci : sock_bt_hci [syz_init_net_socket$bt_hci] write$capi20 : fd_capi20 [openat$capi20] write$capi20_data : fd_capi20 [openat$capi20] write$char_usb : fd_char_usb [syz_open_dev$char_usb] write$damon_attrs : fd_damon_attrs [openat$damon_attrs] write$damon_contexts : fd_damon_contexts [openat$damon_mk_contexts openat$damon_rm_contexts] write$damon_init_regions : fd_damon_init_regions [openat$damon_init_regions] write$damon_monitor_on : fd_damon_monitor_on [openat$damon_monitor_on] write$damon_schemes : fd_damon_schemes [openat$damon_schemes] write$damon_target_ids : fd_damon_target_ids [openat$damon_target_ids] write$evdev : fd_evdev [syz_open_dev$evdev] write$hidraw : fd_hidraw [syz_open_dev$hidraw] write$input_event : time_usec [getitimer getrusage getsockopt$sock_timeval ...] write$midi : fd_midi [syz_open_dev$admmidi syz_open_dev$amidi syz_open_dev$dmmidi syz_open_dev$midi syz_open_dev$sndmidi] write$nbd : sock_nbd_server [socketpair$nbd] write$proc_reclaim : fd_proc_reclaim [openat$proc_reclaim] write$snddsp : fd_snd_dsp [syz_open_dev$sndpcmc syz_open_dev$sndpcmp] write$sndhw : fd_snd_hw [syz_open_dev$sndhw] write$sndhw_fireworks : fd_snd_hw [syz_open_dev$sndhw] write$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] write$trusty_avb : fd_trusty_avb [openat$trusty_avb] write$trusty_gatekeeper : fd_trusty_gatekeeper [openat$trusty_gatekeeper] write$trusty_hwkey : fd_trusty_hwkey [openat$trusty_hwkey] write$trusty_hwrng : fd_trusty_hwrng [openat$trusty_hwrng] write$trusty_km : fd_trusty_km [openat$trusty_km] write$trusty_km_secure : fd_trusty_km_secure [openat$trusty_km_secure] write$trusty_storage : fd_trusty_storage [openat$trusty_storage] write$tun : tcp_seq_num [syz_extract_tcp_res syz_extract_tcp_res$synack] write$usbip_server : fd_usbip_server [syz_usbip_server_init] BinFmtMisc : enabled Comparisons : enabled Coverage : enabled DelayKcovMmap : enabled DevlinkPCI : PCI device 0000:00:10.0 is not available ExtraCoverage : enabled Fault : enabled KCSAN : write(/sys/kernel/debug/kcsan, on) failed KcovResetIoctl : kernel does not support ioctl(KCOV_RESET_TRACE) LRWPANEmulation : enabled Leak : failed to write(kmemleak, "scan=off") NetDevices : enabled NetInjection : enabled NicVF : PCI device 0000:00:11.0 is not available SandboxAndroid : setfilecon: setxattr failed. (errno 1: Operation not permitted). . process exited with status 67. SandboxNamespace : enabled SandboxNone : enabled SandboxSetuid : enabled Swap : enabled USBEmulation : enabled VhciInjection : enabled WifiEmulation : enabled syscalls : 905/8062 2025/12/31 04:09:27 new: machine check complete 2025/12/31 04:09:27 new: adding 13577 seeds 2025/12/31 04:09:43 crash "kernel BUG in hfs_write_inode" is already known 2025/12/31 04:09:43 base crash "kernel BUG in hfs_write_inode" is to be ignored 2025/12/31 04:09:43 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:09:44 crash "kernel BUG in hfs_write_inode" is already known 2025/12/31 04:09:44 base crash "kernel BUG in hfs_write_inode" is to be ignored 2025/12/31 04:09:44 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:09:45 crash "kernel BUG in hfs_write_inode" is already known 2025/12/31 04:09:45 base crash "kernel BUG in hfs_write_inode" is to be ignored 2025/12/31 04:09:45 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:09:46 crash "kernel BUG in hfs_write_inode" is already known 2025/12/31 04:09:46 base crash "kernel BUG in hfs_write_inode" is to be ignored 2025/12/31 04:09:46 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:09:47 crash "kernel BUG in hfs_write_inode" is already known 2025/12/31 04:09:47 base crash "kernel BUG in hfs_write_inode" is to be ignored 2025/12/31 04:09:47 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:09:50 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:09:56 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:10:40 runner 6 connected 2025/12/31 04:10:40 runner 2 connected 2025/12/31 04:10:41 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:10:42 runner 8 connected 2025/12/31 04:10:42 runner 4 connected 2025/12/31 04:10:44 runner 7 connected 2025/12/31 04:10:48 runner 0 connected 2025/12/31 04:10:54 runner 1 connected 2025/12/31 04:11:07 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:11:08 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:11:09 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:11:09 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:11:22 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:11:37 runner 2 connected 2025/12/31 04:11:52 crash "INFO: trying to register non-static key in ocfs2_dlm_shutdown" is already known 2025/12/31 04:11:52 base crash "INFO: trying to register non-static key in ocfs2_dlm_shutdown" is to be ignored 2025/12/31 04:11:52 patched crashed: INFO: trying to register non-static key in ocfs2_dlm_shutdown [need repro = false] 2025/12/31 04:12:05 runner 5 connected 2025/12/31 04:12:05 runner 4 connected 2025/12/31 04:12:06 runner 3 connected 2025/12/31 04:12:08 runner 7 connected 2025/12/31 04:12:20 runner 1 connected 2025/12/31 04:12:22 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:12:26 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:12:42 crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is already known 2025/12/31 04:12:42 base crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is to be ignored 2025/12/31 04:12:42 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:12:49 runner 6 connected 2025/12/31 04:12:54 crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is already known 2025/12/31 04:12:54 base crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is to be ignored 2025/12/31 04:12:54 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:13:11 STAT { "buffer too small": 0, "candidate triage jobs": 47, "candidates": 11794, "comps overflows": 0, "corpus": 1671, "corpus [files]": 1079, "corpus [symbols]": 1110, "cover overflows": 335, "coverage": 70103, "distributor delayed": 3379, "distributor undelayed": 3378, "distributor violated": 64, "exec candidate": 1783, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 0, "exec seeds": 0, "exec smash": 0, "exec total [base]": 2542, "exec total [new]": 7940, "exec triage": 5419, "executor restarts [base]": 76, "executor restarts [new]": 183, "fault jobs": 0, "fuzzer jobs": 47, "fuzzing VMs [base]": 0, "fuzzing VMs [new]": 6, "hints jobs": 0, "max signal": 71281, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 1783, "no exec duration": 35499000000, "no exec requests": 172, "pending": 0, "prog exec time": 325, "reproducing": 0, "rpc recv": 1218342048, "rpc sent": 181340392, "signal": 69383, "smash jobs": 0, "triage jobs": 0, "vm output": 4558430, "vm restarts [base]": 6, "vm restarts [new]": 20 } 2025/12/31 04:13:13 base crash: kernel BUG in jfs_evict_inode 2025/12/31 04:13:19 crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is already known 2025/12/31 04:13:19 base crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is to be ignored 2025/12/31 04:13:19 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:13:20 runner 0 connected 2025/12/31 04:13:24 runner 2 connected 2025/12/31 04:13:31 crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is already known 2025/12/31 04:13:31 base crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is to be ignored 2025/12/31 04:13:31 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:13:40 runner 0 connected 2025/12/31 04:13:40 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:13:41 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:13:42 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:13:43 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:13:51 runner 1 connected 2025/12/31 04:13:54 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:14:10 runner 1 connected 2025/12/31 04:14:17 runner 4 connected 2025/12/31 04:14:19 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:14:28 runner 3 connected 2025/12/31 04:14:38 runner 5 connected 2025/12/31 04:14:38 runner 2 connected 2025/12/31 04:14:39 runner 6 connected 2025/12/31 04:14:41 runner 7 connected 2025/12/31 04:14:47 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:14:51 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:14:53 runner 8 connected 2025/12/31 04:14:56 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:15:03 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:15:06 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:15:16 runner 2 connected 2025/12/31 04:15:18 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:15:19 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:15:37 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:15:45 runner 0 connected 2025/12/31 04:15:47 runner 1 connected 2025/12/31 04:15:55 runner 0 connected 2025/12/31 04:16:01 runner 1 connected 2025/12/31 04:16:05 runner 4 connected 2025/12/31 04:16:15 runner 2 connected 2025/12/31 04:16:16 runner 8 connected 2025/12/31 04:16:21 crash "kernel BUG in txLock" is already known 2025/12/31 04:16:21 base crash "kernel BUG in txLock" is to be ignored 2025/12/31 04:16:21 patched crashed: kernel BUG in txLock [need repro = false] 2025/12/31 04:16:35 runner 2 connected 2025/12/31 04:16:41 crash "kernel BUG in txUnlock" is already known 2025/12/31 04:16:41 base crash "kernel BUG in txUnlock" is to be ignored 2025/12/31 04:16:41 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/12/31 04:16:42 crash "kernel BUG in txUnlock" is already known 2025/12/31 04:16:42 base crash "kernel BUG in txUnlock" is to be ignored 2025/12/31 04:16:42 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/12/31 04:16:55 crash "kernel BUG in txUnlock" is already known 2025/12/31 04:16:55 base crash "kernel BUG in txUnlock" is to be ignored 2025/12/31 04:16:55 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/12/31 04:16:56 crash "kernel BUG in txUnlock" is already known 2025/12/31 04:16:56 base crash "kernel BUG in txUnlock" is to be ignored 2025/12/31 04:16:56 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/12/31 04:16:58 crash "kernel BUG in txUnlock" is already known 2025/12/31 04:16:58 base crash "kernel BUG in txUnlock" is to be ignored 2025/12/31 04:16:58 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/12/31 04:17:19 runner 3 connected 2025/12/31 04:17:36 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:17:37 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:17:39 runner 0 connected 2025/12/31 04:17:40 runner 5 connected 2025/12/31 04:17:46 base crash: kernel BUG in txUnlock 2025/12/31 04:17:49 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:17:52 runner 2 connected 2025/12/31 04:17:54 runner 8 connected 2025/12/31 04:17:54 runner 1 connected 2025/12/31 04:18:05 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:18:07 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:18:08 base crash: kernel BUG in jfs_evict_inode 2025/12/31 04:18:11 STAT { "buffer too small": 0, "candidate triage jobs": 34, "candidates": 9954, "comps overflows": 0, "corpus": 3493, "corpus [files]": 1853, "corpus [symbols]": 1931, "cover overflows": 693, "coverage": 82820, "distributor delayed": 6913, "distributor undelayed": 6905, "distributor violated": 68, "exec candidate": 3623, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 0, "exec seeds": 0, "exec smash": 0, "exec total [base]": 5837, "exec total [new]": 16075, "exec triage": 11019, "executor restarts [base]": 118, "executor restarts [new]": 287, "fault jobs": 0, "fuzzer jobs": 34, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 3, "hints jobs": 0, "max signal": 84018, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 3622, "no exec duration": 35499000000, "no exec requests": 172, "pending": 0, "prog exec time": 514, "reproducing": 0, "rpc recv": 2528611124, "rpc sent": 380460568, "signal": 82057, "smash jobs": 0, "triage jobs": 0, "vm output": 9011951, "vm restarts [base]": 13, "vm restarts [new]": 40 } 2025/12/31 04:18:18 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:18:34 runner 6 connected 2025/12/31 04:18:34 runner 7 connected 2025/12/31 04:18:44 runner 2 connected 2025/12/31 04:18:45 runner 4 connected 2025/12/31 04:19:03 runner 3 connected 2025/12/31 04:19:06 runner 0 connected 2025/12/31 04:19:07 runner 0 connected 2025/12/31 04:19:07 base crash: kernel BUG in jfs_evict_inode 2025/12/31 04:19:15 runner 5 connected 2025/12/31 04:19:35 crash "possible deadlock in ocfs2_evict_inode" is already known 2025/12/31 04:19:35 base crash "possible deadlock in ocfs2_evict_inode" is to be ignored 2025/12/31 04:19:35 patched crashed: possible deadlock in ocfs2_evict_inode [need repro = false] 2025/12/31 04:19:46 crash "possible deadlock in ocfs2_evict_inode" is already known 2025/12/31 04:19:46 base crash "possible deadlock in ocfs2_evict_inode" is to be ignored 2025/12/31 04:19:46 patched crashed: possible deadlock in ocfs2_evict_inode [need repro = false] 2025/12/31 04:20:04 runner 1 connected 2025/12/31 04:20:35 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:20:39 runner 6 connected 2025/12/31 04:20:51 runner 7 connected 2025/12/31 04:20:57 base crash: kernel BUG in txUnlock 2025/12/31 04:21:15 crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is already known 2025/12/31 04:21:15 base crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is to be ignored 2025/12/31 04:21:15 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:21:26 crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is already known 2025/12/31 04:21:26 base crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is to be ignored 2025/12/31 04:21:26 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:21:39 runner 1 connected 2025/12/31 04:21:46 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:21:55 runner 0 connected 2025/12/31 04:22:13 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:22:18 runner 7 connected 2025/12/31 04:22:21 crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is already known 2025/12/31 04:22:21 base crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is to be ignored 2025/12/31 04:22:21 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:22:23 runner 4 connected 2025/12/31 04:22:27 base crash: kernel BUG in jfs_evict_inode 2025/12/31 04:22:30 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:22:34 crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is already known 2025/12/31 04:22:34 base crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is to be ignored 2025/12/31 04:22:34 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:22:36 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:22:43 runner 1 connected 2025/12/31 04:23:10 runner 3 connected 2025/12/31 04:23:11 STAT { "buffer too small": 0, "candidate triage jobs": 22, "candidates": 7663, "comps overflows": 0, "corpus": 5712, "corpus [files]": 2688, "corpus [symbols]": 2806, "cover overflows": 1143, "coverage": 94204, "distributor delayed": 10003, "distributor undelayed": 9998, "distributor violated": 102, "exec candidate": 5914, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 0, "exec seeds": 0, "exec smash": 0, "exec total [base]": 9372, "exec total [new]": 26944, "exec triage": 17898, "executor restarts [base]": 162, "executor restarts [new]": 439, "fault jobs": 0, "fuzzer jobs": 22, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 4, "hints jobs": 0, "max signal": 95195, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 5913, "no exec duration": 35499000000, "no exec requests": 172, "pending": 0, "prog exec time": 401, "reproducing": 0, "rpc recv": 3553437856, "rpc sent": 623646112, "signal": 92889, "smash jobs": 0, "triage jobs": 0, "vm output": 16235350, "vm restarts [base]": 18, "vm restarts [new]": 52 } 2025/12/31 04:23:18 runner 1 connected 2025/12/31 04:23:25 runner 0 connected 2025/12/31 04:23:26 base crash: kernel BUG in jfs_evict_inode 2025/12/31 04:23:27 runner 0 connected 2025/12/31 04:23:31 runner 8 connected 2025/12/31 04:23:33 runner 5 connected 2025/12/31 04:23:33 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:23:34 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:23:34 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:23:46 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:23:46 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:24:12 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:24:23 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:24:24 runner 2 connected 2025/12/31 04:24:31 runner 7 connected 2025/12/31 04:24:31 runner 3 connected 2025/12/31 04:24:32 runner 2 connected 2025/12/31 04:24:42 runner 4 connected 2025/12/31 04:24:43 runner 1 connected 2025/12/31 04:25:11 runner 0 connected 2025/12/31 04:25:19 crash "possible deadlock in hfs_extend_file" is already known 2025/12/31 04:25:19 base crash "possible deadlock in hfs_extend_file" is to be ignored 2025/12/31 04:25:19 patched crashed: possible deadlock in hfs_extend_file [need repro = false] 2025/12/31 04:25:20 runner 1 connected 2025/12/31 04:25:23 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:25:25 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:25:26 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:25:28 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:25:30 crash "possible deadlock in hfs_extend_file" is already known 2025/12/31 04:25:30 base crash "possible deadlock in hfs_extend_file" is to be ignored 2025/12/31 04:25:30 patched crashed: possible deadlock in hfs_extend_file [need repro = false] 2025/12/31 04:26:03 base crash: kernel BUG in jfs_evict_inode 2025/12/31 04:26:15 runner 6 connected 2025/12/31 04:26:19 runner 7 connected 2025/12/31 04:26:23 runner 2 connected 2025/12/31 04:26:24 runner 4 connected 2025/12/31 04:26:26 runner 3 connected 2025/12/31 04:26:28 runner 5 connected 2025/12/31 04:26:32 base crash: kernel BUG in jfs_evict_inode 2025/12/31 04:27:02 runner 1 connected 2025/12/31 04:27:12 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:27:13 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:27:14 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:27:15 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:27:28 runner 2 connected 2025/12/31 04:27:29 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:27:43 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:28:09 runner 5 connected 2025/12/31 04:28:09 crash "INFO: task hung in __iterate_supers" is already known 2025/12/31 04:28:09 base crash "INFO: task hung in __iterate_supers" is to be ignored 2025/12/31 04:28:09 patched crashed: INFO: task hung in __iterate_supers [need repro = false] 2025/12/31 04:28:10 runner 4 connected 2025/12/31 04:28:11 STAT { "buffer too small": 0, "candidate triage jobs": 191, "candidates": 5812, "comps overflows": 0, "corpus": 7330, "corpus [files]": 3225, "corpus [symbols]": 3365, "cover overflows": 1525, "coverage": 98301, "distributor delayed": 13119, "distributor undelayed": 12937, "distributor violated": 220, "exec candidate": 7765, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 0, "exec seeds": 0, "exec smash": 0, "exec total [base]": 12727, "exec total [new]": 36231, "exec triage": 23184, "executor restarts [base]": 215, "executor restarts [new]": 583, "fault jobs": 0, "fuzzer jobs": 191, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 2, "hints jobs": 0, "max signal": 99807, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 7763, "no exec duration": 35499000000, "no exec requests": 172, "pending": 0, "prog exec time": 370, "reproducing": 0, "rpc recv": 4668783424, "rpc sent": 845974184, "signal": 97032, "smash jobs": 0, "triage jobs": 0, "vm output": 21587764, "vm restarts [base]": 24, "vm restarts [new]": 69 } 2025/12/31 04:28:11 runner 0 connected 2025/12/31 04:28:12 runner 6 connected 2025/12/31 04:28:14 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:28:23 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:28:27 runner 3 connected 2025/12/31 04:28:41 runner 2 connected 2025/12/31 04:29:07 runner 8 connected 2025/12/31 04:29:13 runner 1 connected 2025/12/31 04:29:20 runner 0 connected 2025/12/31 04:29:24 crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is already known 2025/12/31 04:29:24 base crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is to be ignored 2025/12/31 04:29:24 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:29:28 crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is already known 2025/12/31 04:29:28 base crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is to be ignored 2025/12/31 04:29:28 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:29:57 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:30:04 crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is already known 2025/12/31 04:30:04 base crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is to be ignored 2025/12/31 04:30:04 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:30:16 crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is already known 2025/12/31 04:30:16 base crash "KASAN: slab-out-of-bounds Read in dtSplitPage" is to be ignored 2025/12/31 04:30:16 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:30:30 runner 5 connected 2025/12/31 04:30:33 runner 3 connected 2025/12/31 04:30:41 base crash: KASAN: slab-out-of-bounds Read in dtSplitPage 2025/12/31 04:30:44 base crash: possible deadlock in run_unpack_ex 2025/12/31 04:30:55 runner 4 connected 2025/12/31 04:31:01 runner 7 connected 2025/12/31 04:31:13 runner 1 connected 2025/12/31 04:31:38 runner 1 connected 2025/12/31 04:31:40 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:31:42 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:31:50 runner 2 connected 2025/12/31 04:31:56 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:32:11 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:32:18 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:32:36 runner 8 connected 2025/12/31 04:32:41 runner 2 connected 2025/12/31 04:32:53 runner 3 connected 2025/12/31 04:33:08 runner 0 connected 2025/12/31 04:33:11 STAT { "buffer too small": 0, "candidate triage jobs": 4, "candidates": 5119, "comps overflows": 0, "corpus": 8148, "corpus [files]": 3516, "corpus [symbols]": 3685, "cover overflows": 1902, "coverage": 100548, "distributor delayed": 14005, "distributor undelayed": 14005, "distributor violated": 256, "exec candidate": 8458, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 0, "exec seeds": 0, "exec smash": 0, "exec total [base]": 17233, "exec total [new]": 49814, "exec triage": 25480, "executor restarts [base]": 262, "executor restarts [new]": 726, "fault jobs": 0, "fuzzer jobs": 4, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 7, "hints jobs": 0, "max signal": 101501, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 8424, "no exec duration": 35499000000, "no exec requests": 172, "pending": 0, "prog exec time": 275, "reproducing": 0, "rpc recv": 5569680836, "rpc sent": 1132549088, "signal": 99230, "smash jobs": 0, "triage jobs": 0, "vm output": 28044405, "vm restarts [base]": 28, "vm restarts [new]": 83 } 2025/12/31 04:33:17 runner 1 connected 2025/12/31 04:33:39 base crash: INFO: task hung in __iterate_supers 2025/12/31 04:33:39 base crash: KASAN: slab-out-of-bounds Read in dtSplitPage 2025/12/31 04:33:41 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:33:50 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:34:41 triaged 100.0% of the corpus 2025/12/31 04:34:41 triaged 100.0% of the corpus 2025/12/31 04:34:41 starting bug reproductions 2025/12/31 04:34:41 starting bug reproductions (max 6 VMs, 4 repros) 2025/12/31 04:34:42 runner 1 connected 2025/12/31 04:34:44 runner 0 connected 2025/12/31 04:34:46 runner 2 connected 2025/12/31 04:34:48 runner 8 connected 2025/12/31 04:34:54 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:34:59 crash "possible deadlock in ocfs2_try_remove_refcount_tree" is already known 2025/12/31 04:34:59 base crash "possible deadlock in ocfs2_try_remove_refcount_tree" is to be ignored 2025/12/31 04:34:59 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:35:12 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:35:13 crash "possible deadlock in ocfs2_try_remove_refcount_tree" is already known 2025/12/31 04:35:13 base crash "possible deadlock in ocfs2_try_remove_refcount_tree" is to be ignored 2025/12/31 04:35:13 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:35:27 crash "possible deadlock in ocfs2_try_remove_refcount_tree" is already known 2025/12/31 04:35:27 base crash "possible deadlock in ocfs2_try_remove_refcount_tree" is to be ignored 2025/12/31 04:35:27 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:35:50 base crash: KASAN: slab-out-of-bounds Read in dtSplitPage 2025/12/31 04:35:52 runner 1 connected 2025/12/31 04:35:55 runner 6 connected 2025/12/31 04:35:58 base crash: kernel BUG in jfs_evict_inode 2025/12/31 04:35:59 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 04:36:11 runner 8 connected 2025/12/31 04:36:11 runner 2 connected 2025/12/31 04:36:24 runner 4 connected 2025/12/31 04:36:29 crash "WARNING in udf_truncate_extents" is already known 2025/12/31 04:36:29 base crash "WARNING in udf_truncate_extents" is to be ignored 2025/12/31 04:36:29 patched crashed: WARNING in udf_truncate_extents [need repro = false] 2025/12/31 04:36:47 runner 1 connected 2025/12/31 04:36:56 runner 2 connected 2025/12/31 04:36:56 runner 0 connected 2025/12/31 04:36:57 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:37:08 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:37:22 patched crashed: INFO: task hung in __iterate_supers [need repro = false] 2025/12/31 04:37:26 runner 3 connected 2025/12/31 04:38:00 runner 6 connected 2025/12/31 04:38:03 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 04:38:05 runner 0 connected 2025/12/31 04:38:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 13, "corpus": 8367, "corpus [files]": 3529, "corpus [symbols]": 3698, "cover overflows": 3672, "coverage": 102986, "distributor delayed": 14308, "distributor undelayed": 14308, "distributor violated": 256, "exec candidate": 13577, "exec collide": 404, "exec fuzz": 723, "exec gen": 32, "exec hints": 97, "exec inject": 0, "exec minimize": 2088, "exec retries": 0, "exec seeds": 360, "exec smash": 497, "exec total [base]": 19675, "exec total [new]": 60138, "exec triage": 26140, "executor restarts [base]": 317, "executor restarts [new]": 921, "fault jobs": 0, "fuzzer jobs": 396, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 7, "hints jobs": 165, "max signal": 105226, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 1734, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 8704, "no exec duration": 35499000000, "no exec requests": 172, "pending": 0, "prog exec time": 485, "reproducing": 0, "rpc recv": 6265886136, "rpc sent": 1485581536, "signal": 100930, "smash jobs": 200, "triage jobs": 31, "vm output": 34244267, "vm restarts [base]": 34, "vm restarts [new]": 93 } 2025/12/31 04:38:20 runner 7 connected 2025/12/31 04:39:00 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:39:04 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:39:08 runner 1 connected 2025/12/31 04:39:13 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:39:28 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:39:36 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:39:57 runner 2 connected 2025/12/31 04:40:01 runner 6 connected 2025/12/31 04:40:12 runner 1 connected 2025/12/31 04:40:26 runner 5 connected 2025/12/31 04:40:33 runner 7 connected 2025/12/31 04:41:07 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:41:15 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:41:25 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 04:41:31 crash "possible deadlock in ocfs2_del_inode_from_orphan" is already known 2025/12/31 04:41:31 base crash "possible deadlock in ocfs2_del_inode_from_orphan" is to be ignored 2025/12/31 04:41:31 patched crashed: possible deadlock in ocfs2_del_inode_from_orphan [need repro = false] 2025/12/31 04:41:52 crash "possible deadlock in ocfs2_del_inode_from_orphan" is already known 2025/12/31 04:41:52 base crash "possible deadlock in ocfs2_del_inode_from_orphan" is to be ignored 2025/12/31 04:41:52 patched crashed: possible deadlock in ocfs2_del_inode_from_orphan [need repro = false] 2025/12/31 04:41:58 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 04:42:05 base crash: possible deadlock in ocfs2_evict_inode 2025/12/31 04:42:05 runner 0 connected 2025/12/31 04:42:06 crash "possible deadlock in ocfs2_del_inode_from_orphan" is already known 2025/12/31 04:42:06 base crash "possible deadlock in ocfs2_del_inode_from_orphan" is to be ignored 2025/12/31 04:42:06 patched crashed: possible deadlock in ocfs2_del_inode_from_orphan [need repro = false] 2025/12/31 04:42:12 runner 2 connected 2025/12/31 04:42:22 runner 3 connected 2025/12/31 04:42:29 runner 5 connected 2025/12/31 04:42:49 runner 7 connected 2025/12/31 04:42:55 runner 2 connected 2025/12/31 04:43:03 runner 1 connected 2025/12/31 04:43:05 runner 8 connected 2025/12/31 04:43:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 60, "corpus": 8595, "corpus [files]": 3572, "corpus [symbols]": 3742, "cover overflows": 6194, "coverage": 104747, "distributor delayed": 14585, "distributor undelayed": 14585, "distributor violated": 256, "exec candidate": 13577, "exec collide": 910, "exec fuzz": 1678, "exec gen": 64, "exec hints": 267, "exec inject": 0, "exec minimize": 5105, "exec retries": 0, "exec seeds": 846, "exec smash": 1335, "exec total [base]": 22668, "exec total [new]": 66833, "exec triage": 26818, "executor restarts [base]": 366, "executor restarts [new]": 1098, "fault jobs": 0, "fuzzer jobs": 722, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 8, "hints jobs": 290, "max signal": 107247, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 3929, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 8997, "no exec duration": 35989000000, "no exec requests": 176, "pending": 0, "prog exec time": 468, "reproducing": 0, "rpc recv": 7083327792, "rpc sent": 1869155952, "signal": 102523, "smash jobs": 412, "triage jobs": 20, "vm output": 40100471, "vm restarts [base]": 37, "vm restarts [new]": 105 } 2025/12/31 04:43:18 base crash: KASAN: slab-out-of-bounds Read in dtSplitPage 2025/12/31 04:43:27 patched crashed: possible deadlock in ocfs2_evict_inode [need repro = false] 2025/12/31 04:43:52 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:43:57 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:44:10 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:44:23 runner 0 connected 2025/12/31 04:44:23 runner 2 connected 2025/12/31 04:44:49 base crash: possible deadlock in hfs_find_init 2025/12/31 04:44:50 runner 8 connected 2025/12/31 04:44:53 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:44:53 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:44:54 runner 4 connected 2025/12/31 04:45:08 runner 5 connected 2025/12/31 04:45:21 base crash: possible deadlock in ocfs2_del_inode_from_orphan 2025/12/31 04:45:23 patched crashed: possible deadlock in ocfs2_evict_inode [need repro = false] 2025/12/31 04:45:46 runner 1 connected 2025/12/31 04:45:50 runner 1 connected 2025/12/31 04:45:51 runner 3 connected 2025/12/31 04:45:58 patched crashed: possible deadlock in run_unpack_ex [need repro = false] 2025/12/31 04:46:19 runner 2 connected 2025/12/31 04:46:27 runner 6 connected 2025/12/31 04:46:39 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:46:40 patched crashed: possible deadlock in run_unpack_ex [need repro = false] 2025/12/31 04:47:01 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 04:47:03 runner 7 connected 2025/12/31 04:47:19 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:47:38 runner 2 connected 2025/12/31 04:47:46 crash "general protection fault in txEnd" is already known 2025/12/31 04:47:46 base crash "general protection fault in txEnd" is to be ignored 2025/12/31 04:47:46 patched crashed: general protection fault in txEnd [need repro = false] 2025/12/31 04:47:46 runner 5 connected 2025/12/31 04:48:00 runner 2 connected 2025/12/31 04:48:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 154, "corpus": 8817, "corpus [files]": 3592, "corpus [symbols]": 3768, "cover overflows": 9169, "coverage": 106735, "distributor delayed": 14878, "distributor undelayed": 14878, "distributor violated": 256, "exec candidate": 13577, "exec collide": 1494, "exec fuzz": 2741, "exec gen": 105, "exec hints": 462, "exec inject": 0, "exec minimize": 8086, "exec retries": 0, "exec seeds": 1351, "exec smash": 2326, "exec total [base]": 24790, "exec total [new]": 73837, "exec triage": 27457, "executor restarts [base]": 418, "executor restarts [new]": 1220, "fault jobs": 0, "fuzzer jobs": 1056, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 6, "hints jobs": 420, "max signal": 109106, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 5953, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 9275, "no exec duration": 36603000000, "no exec requests": 178, "pending": 0, "prog exec time": 617, "reproducing": 0, "rpc recv": 7934836744, "rpc sent": 2269195256, "signal": 104183, "smash jobs": 611, "triage jobs": 25, "vm output": 46846804, "vm restarts [base]": 41, "vm restarts [new]": 115 } 2025/12/31 04:48:16 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:48:17 runner 6 connected 2025/12/31 04:48:42 runner 8 connected 2025/12/31 04:48:47 patched crashed: possible deadlock in run_unpack_ex [need repro = false] 2025/12/31 04:48:55 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:49:15 runner 3 connected 2025/12/31 04:49:22 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 04:49:45 runner 4 connected 2025/12/31 04:49:47 patched crashed: INFO: task hung in path_openat [need repro = true] 2025/12/31 04:49:47 scheduled a reproduction of 'INFO: task hung in path_openat' 2025/12/31 04:49:47 start reproducing 'INFO: task hung in path_openat' 2025/12/31 04:49:57 base crash: possible deadlock in run_unpack_ex 2025/12/31 04:50:01 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 04:50:04 crash "WARNING in dbAdjTree" is already known 2025/12/31 04:50:04 base crash "WARNING in dbAdjTree" is to be ignored 2025/12/31 04:50:04 patched crashed: WARNING in dbAdjTree [need repro = false] 2025/12/31 04:50:04 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:50:20 runner 0 connected 2025/12/31 04:50:53 runner 2 connected 2025/12/31 04:50:58 runner 1 connected 2025/12/31 04:51:01 runner 6 connected 2025/12/31 04:51:02 runner 8 connected 2025/12/31 04:51:33 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:51:39 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/12/31 04:52:12 base crash: WARNING in dbAdjTree 2025/12/31 04:52:22 patched crashed: WARNING in dbAdjTree [need repro = false] 2025/12/31 04:52:32 runner 4 connected 2025/12/31 04:52:36 runner 7 connected 2025/12/31 04:52:41 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:53:09 runner 2 connected 2025/12/31 04:53:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 233, "corpus": 8928, "corpus [files]": 3603, "corpus [symbols]": 3783, "cover overflows": 10874, "coverage": 107074, "distributor delayed": 15087, "distributor undelayed": 15087, "distributor violated": 256, "exec candidate": 13577, "exec collide": 2034, "exec fuzz": 3646, "exec gen": 145, "exec hints": 649, "exec inject": 0, "exec minimize": 9754, "exec retries": 0, "exec seeds": 1761, "exec smash": 3214, "exec total [base]": 26916, "exec total [new]": 78878, "exec triage": 27851, "executor restarts [base]": 481, "executor restarts [new]": 1408, "fault jobs": 0, "fuzzer jobs": 1143, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 5, "hints jobs": 430, "max signal": 109894, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 7393, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 9446, "no exec duration": 37450000000, "no exec requests": 182, "pending": 0, "prog exec time": 579, "reproducing": 1, "rpc recv": 8564819944, "rpc sent": 2580511696, "signal": 104524, "smash jobs": 696, "triage jobs": 17, "vm output": 52046333, "vm restarts [base]": 45, "vm restarts [new]": 123 } 2025/12/31 04:53:15 base crash: possible deadlock in ocfs2_evict_inode 2025/12/31 04:53:19 runner 6 connected 2025/12/31 04:53:39 runner 3 connected 2025/12/31 04:53:45 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:53:55 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:53:57 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:54:11 runner 0 connected 2025/12/31 04:54:23 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:54:23 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 04:54:43 runner 4 connected 2025/12/31 04:54:53 runner 7 connected 2025/12/31 04:54:56 runner 8 connected 2025/12/31 04:54:59 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:55:00 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:55:19 runner 6 connected 2025/12/31 04:55:33 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:55:45 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:55:48 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 04:55:50 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:55:57 runner 3 connected 2025/12/31 04:55:57 base crash: kernel BUG in hfs_write_inode 2025/12/31 04:55:57 runner 2 connected 2025/12/31 04:56:14 patched crashed: possible deadlock in ocfs2_evict_inode [need repro = false] 2025/12/31 04:56:30 runner 4 connected 2025/12/31 04:56:30 patched crashed: possible deadlock in ocfs2_del_inode_from_orphan [need repro = false] 2025/12/31 04:56:33 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:56:36 runner 5 connected 2025/12/31 04:56:44 runner 0 connected 2025/12/31 04:56:48 runner 8 connected 2025/12/31 04:56:56 runner 1 connected 2025/12/31 04:57:02 base crash: kernel BUG in jfs_evict_inode 2025/12/31 04:57:10 runner 6 connected 2025/12/31 04:57:16 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:57:27 runner 7 connected 2025/12/31 04:57:28 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 04:57:30 runner 2 connected 2025/12/31 04:58:00 runner 2 connected 2025/12/31 04:58:10 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 04:58:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 309, "corpus": 9075, "corpus [files]": 3618, "corpus [symbols]": 3798, "cover overflows": 12825, "coverage": 107851, "distributor delayed": 15341, "distributor undelayed": 15341, "distributor violated": 256, "exec candidate": 13577, "exec collide": 2534, "exec fuzz": 4515, "exec gen": 191, "exec hints": 838, "exec inject": 0, "exec minimize": 11653, "exec retries": 0, "exec seeds": 2111, "exec smash": 4084, "exec total [base]": 29312, "exec total [new]": 84024, "exec triage": 28260, "executor restarts [base]": 539, "executor restarts [new]": 1497, "fault jobs": 0, "fuzzer jobs": 1313, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 5, "hints jobs": 466, "max signal": 110678, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 8670, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 9630, "no exec duration": 39427000000, "no exec requests": 188, "pending": 0, "prog exec time": 623, "reproducing": 1, "rpc recv": 9463602320, "rpc sent": 2907484656, "signal": 105846, "smash jobs": 827, "triage jobs": 20, "vm output": 56521230, "vm restarts [base]": 49, "vm restarts [new]": 137 } 2025/12/31 04:58:12 runner 8 connected 2025/12/31 04:58:18 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 04:58:21 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 04:58:51 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 04:59:00 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 04:59:02 crash "possible deadlock in hfs_extend_file" is already known 2025/12/31 04:59:02 base crash "possible deadlock in hfs_extend_file" is to be ignored 2025/12/31 04:59:02 patched crashed: possible deadlock in hfs_extend_file [need repro = false] 2025/12/31 04:59:07 runner 4 connected 2025/12/31 04:59:16 runner 1 connected 2025/12/31 04:59:19 runner 0 connected 2025/12/31 04:59:49 runner 8 connected 2025/12/31 04:59:58 runner 5 connected 2025/12/31 05:00:00 runner 3 connected 2025/12/31 05:00:06 crash "possible deadlock in hfs_extend_file" is already known 2025/12/31 05:00:06 base crash "possible deadlock in hfs_extend_file" is to be ignored 2025/12/31 05:00:06 patched crashed: possible deadlock in hfs_extend_file [need repro = false] 2025/12/31 05:00:15 crash "WARNING in hfs_bnode_create" is already known 2025/12/31 05:00:15 base crash "WARNING in hfs_bnode_create" is to be ignored 2025/12/31 05:00:15 patched crashed: WARNING in hfs_bnode_create [need repro = false] 2025/12/31 05:00:18 base crash: kernel BUG in jfs_evict_inode 2025/12/31 05:00:32 crash "possible deadlock in ocfs2_init_acl" is already known 2025/12/31 05:00:32 base crash "possible deadlock in ocfs2_init_acl" is to be ignored 2025/12/31 05:00:32 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/12/31 05:00:33 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:00:36 crash "possible deadlock in ocfs2_init_acl" is already known 2025/12/31 05:00:36 base crash "possible deadlock in ocfs2_init_acl" is to be ignored 2025/12/31 05:00:36 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/12/31 05:00:39 base crash: WARNING in hfs_bnode_create 2025/12/31 05:00:53 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/12/31 05:01:04 runner 6 connected 2025/12/31 05:01:12 runner 4 connected 2025/12/31 05:01:15 runner 2 connected 2025/12/31 05:01:28 runner 8 connected 2025/12/31 05:01:34 runner 5 connected 2025/12/31 05:01:37 runner 1 connected 2025/12/31 05:01:52 runner 2 connected 2025/12/31 05:01:53 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:02:10 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:02:25 base crash: kernel BUG in hfs_write_inode 2025/12/31 05:02:46 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:02:50 runner 4 connected 2025/12/31 05:02:54 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:03:08 runner 2 connected 2025/12/31 05:03:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 356, "corpus": 9201, "corpus [files]": 3639, "corpus [symbols]": 3821, "cover overflows": 14898, "coverage": 108304, "distributor delayed": 15568, "distributor undelayed": 15568, "distributor violated": 256, "exec candidate": 13577, "exec collide": 2992, "exec fuzz": 5304, "exec gen": 233, "exec hints": 1005, "exec inject": 0, "exec minimize": 13655, "exec retries": 0, "exec seeds": 2410, "exec smash": 4912, "exec total [base]": 31345, "exec total [new]": 89013, "exec triage": 28656, "executor restarts [base]": 573, "executor restarts [new]": 1588, "fault jobs": 0, "fuzzer jobs": 1486, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 5, "hints jobs": 529, "max signal": 111285, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 10195, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 9803, "no exec duration": 39427000000, "no exec requests": 188, "pending": 0, "prog exec time": 699, "reproducing": 1, "rpc recv": 10209587864, "rpc sent": 3247205744, "signal": 106300, "smash jobs": 942, "triage jobs": 15, "vm output": 62967769, "vm restarts [base]": 54, "vm restarts [new]": 148 } 2025/12/31 05:03:22 runner 1 connected 2025/12/31 05:03:38 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:03:41 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:03:43 runner 5 connected 2025/12/31 05:03:51 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:03:52 runner 3 connected 2025/12/31 05:04:11 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:04:40 runner 4 connected 2025/12/31 05:04:40 base crash: kernel BUG in txUnlock 2025/12/31 05:04:41 fuzzer has reached the modified code (3827 + 3645 + 0), continuing fuzzing 2025/12/31 05:04:49 runner 2 connected 2025/12/31 05:05:08 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 05:05:10 runner 5 connected 2025/12/31 05:05:38 runner 0 connected 2025/12/31 05:06:10 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:06:13 runner 8 connected 2025/12/31 05:06:32 crash "possible deadlock in ntfs_look_for_free_space" is already known 2025/12/31 05:06:32 base crash "possible deadlock in ntfs_look_for_free_space" is to be ignored 2025/12/31 05:06:32 patched crashed: possible deadlock in ntfs_look_for_free_space [need repro = false] 2025/12/31 05:06:41 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:06:43 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:06:58 base crash: kernel BUG in hfs_write_inode 2025/12/31 05:07:04 patched crashed: INFO: task hung in blkdev_write_iter [need repro = true] 2025/12/31 05:07:04 scheduled a reproduction of 'INFO: task hung in blkdev_write_iter' 2025/12/31 05:07:04 start reproducing 'INFO: task hung in blkdev_write_iter' 2025/12/31 05:07:08 base crash: kernel BUG in jfs_evict_inode 2025/12/31 05:07:10 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:07:20 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:07:28 runner 5 connected 2025/12/31 05:07:38 runner 0 connected 2025/12/31 05:07:55 runner 1 connected 2025/12/31 05:08:02 runner 6 connected 2025/12/31 05:08:04 runner 2 connected 2025/12/31 05:08:08 runner 7 connected 2025/12/31 05:08:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 412, "corpus": 9339, "corpus [files]": 3658, "corpus [symbols]": 3843, "cover overflows": 16763, "coverage": 108934, "distributor delayed": 15819, "distributor undelayed": 15816, "distributor violated": 256, "exec candidate": 13577, "exec collide": 3504, "exec fuzz": 6288, "exec gen": 293, "exec hints": 1195, "exec inject": 0, "exec minimize": 15544, "exec retries": 0, "exec seeds": 2768, "exec smash": 5920, "exec total [base]": 33393, "exec total [new]": 94450, "exec triage": 29090, "executor restarts [base]": 615, "executor restarts [new]": 1689, "fault jobs": 0, "fuzzer jobs": 1655, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 4, "hints jobs": 571, "max signal": 111972, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 11560, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 9995, "no exec duration": 39427000000, "no exec requests": 188, "pending": 0, "prog exec time": 544, "reproducing": 2, "rpc recv": 10908959644, "rpc sent": 3571033624, "signal": 106824, "smash jobs": 1061, "triage jobs": 23, "vm output": 67636097, "vm restarts [base]": 60, "vm restarts [new]": 156 } 2025/12/31 05:08:17 runner 4 connected 2025/12/31 05:08:47 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:08:51 crash "kernel BUG in ext4_do_writepages" is already known 2025/12/31 05:08:51 base crash "kernel BUG in ext4_do_writepages" is to be ignored 2025/12/31 05:08:51 patched crashed: kernel BUG in ext4_do_writepages [need repro = false] 2025/12/31 05:08:52 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 05:08:53 crash "possible deadlock in ocfs2_init_acl" is already known 2025/12/31 05:08:53 base crash "possible deadlock in ocfs2_init_acl" is to be ignored 2025/12/31 05:08:53 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/12/31 05:09:01 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:09:23 crash "possible deadlock in ocfs2_init_acl" is already known 2025/12/31 05:09:23 base crash "possible deadlock in ocfs2_init_acl" is to be ignored 2025/12/31 05:09:23 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/12/31 05:09:45 runner 6 connected 2025/12/31 05:09:48 runner 7 connected 2025/12/31 05:09:48 runner 8 connected 2025/12/31 05:09:50 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:09:51 runner 4 connected 2025/12/31 05:09:59 runner 3 connected 2025/12/31 05:10:05 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:10:08 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:10:12 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 05:10:18 base crash: possible deadlock in ocfs2_evict_inode 2025/12/31 05:10:21 runner 5 connected 2025/12/31 05:10:39 crash "UBSAN: array-index-out-of-bounds in dtSplitPage" is already known 2025/12/31 05:10:39 base crash "UBSAN: array-index-out-of-bounds in dtSplitPage" is to be ignored 2025/12/31 05:10:39 patched crashed: UBSAN: array-index-out-of-bounds in dtSplitPage [need repro = false] 2025/12/31 05:10:55 crash "UBSAN: array-index-out-of-bounds in dtSplitPage" is already known 2025/12/31 05:10:55 base crash "UBSAN: array-index-out-of-bounds in dtSplitPage" is to be ignored 2025/12/31 05:10:55 patched crashed: UBSAN: array-index-out-of-bounds in dtSplitPage [need repro = false] 2025/12/31 05:11:01 runner 1 connected 2025/12/31 05:11:05 runner 2 connected 2025/12/31 05:11:09 runner 6 connected 2025/12/31 05:11:15 runner 0 connected 2025/12/31 05:11:20 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:11:27 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:11:36 runner 3 connected 2025/12/31 05:11:54 runner 7 connected 2025/12/31 05:12:02 base crash: kernel BUG in jfs_evict_inode 2025/12/31 05:12:09 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:12:24 runner 1 connected 2025/12/31 05:12:49 crash "possible deadlock in ntfs_look_for_free_space" is already known 2025/12/31 05:12:49 base crash "possible deadlock in ntfs_look_for_free_space" is to be ignored 2025/12/31 05:12:49 patched crashed: possible deadlock in ntfs_look_for_free_space [need repro = false] 2025/12/31 05:13:00 runner 2 connected 2025/12/31 05:13:07 runner 4 connected 2025/12/31 05:13:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 465, "corpus": 9425, "corpus [files]": 3668, "corpus [symbols]": 3860, "cover overflows": 18474, "coverage": 109252, "distributor delayed": 15982, "distributor undelayed": 15979, "distributor violated": 256, "exec candidate": 13577, "exec collide": 3925, "exec fuzz": 7179, "exec gen": 344, "exec hints": 1358, "exec inject": 0, "exec minimize": 16801, "exec retries": 0, "exec seeds": 3099, "exec smash": 6791, "exec total [base]": 35659, "exec total [new]": 98735, "exec triage": 29380, "executor restarts [base]": 656, "executor restarts [new]": 1791, "fault jobs": 0, "fuzzer jobs": 1706, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 4, "hints jobs": 565, "max signal": 112339, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 12411, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 10118, "no exec duration": 39908000000, "no exec requests": 190, "pending": 0, "prog exec time": 552, "reproducing": 2, "rpc recv": 11728078808, "rpc sent": 3860628232, "signal": 107671, "smash jobs": 1125, "triage jobs": 16, "vm output": 71369572, "vm restarts [base]": 65, "vm restarts [new]": 167 } 2025/12/31 05:13:34 base crash: possible deadlock in hfs_extend_file 2025/12/31 05:13:37 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 05:13:49 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:13:55 runner 6 connected 2025/12/31 05:13:59 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:14:31 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:14:32 runner 0 connected 2025/12/31 05:14:34 crash "possible deadlock in attr_data_get_block" is already known 2025/12/31 05:14:34 base crash "possible deadlock in attr_data_get_block" is to be ignored 2025/12/31 05:14:34 patched crashed: possible deadlock in attr_data_get_block [need repro = false] 2025/12/31 05:14:36 runner 8 connected 2025/12/31 05:14:58 runner 5 connected 2025/12/31 05:15:25 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:15:29 runner 1 connected 2025/12/31 05:15:32 runner 3 connected 2025/12/31 05:15:33 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:16:16 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:16:20 crash "kernel BUG in dbFindLeaf" is already known 2025/12/31 05:16:20 base crash "kernel BUG in dbFindLeaf" is to be ignored 2025/12/31 05:16:20 patched crashed: kernel BUG in dbFindLeaf [need repro = false] 2025/12/31 05:16:22 runner 6 connected 2025/12/31 05:16:25 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:16:30 runner 5 connected 2025/12/31 05:17:01 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:17:04 patched crashed: INFO: task hung in f2fs_balance_fs [need repro = true] 2025/12/31 05:17:04 scheduled a reproduction of 'INFO: task hung in f2fs_balance_fs' 2025/12/31 05:17:04 start reproducing 'INFO: task hung in f2fs_balance_fs' 2025/12/31 05:17:12 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:17:24 runner 2 connected 2025/12/31 05:17:48 crash "kernel BUG in txAbort" is already known 2025/12/31 05:17:48 base crash "kernel BUG in txAbort" is to be ignored 2025/12/31 05:17:48 patched crashed: kernel BUG in txAbort [need repro = false] 2025/12/31 05:17:52 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:17:55 base crash: kernel BUG in jfs_evict_inode 2025/12/31 05:17:59 runner 6 connected 2025/12/31 05:18:02 runner 4 connected 2025/12/31 05:18:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 530, "corpus": 9507, "corpus [files]": 3676, "corpus [symbols]": 3871, "cover overflows": 20189, "coverage": 109563, "distributor delayed": 16149, "distributor undelayed": 16149, "distributor violated": 256, "exec candidate": 13577, "exec collide": 4428, "exec fuzz": 8147, "exec gen": 397, "exec hints": 1545, "exec inject": 0, "exec minimize": 18015, "exec retries": 0, "exec seeds": 3419, "exec smash": 7806, "exec total [base]": 38198, "exec total [new]": 103246, "exec triage": 29625, "executor restarts [base]": 717, "executor restarts [new]": 1891, "fault jobs": 0, "fuzzer jobs": 1740, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 4, "hints jobs": 543, "max signal": 112728, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 13266, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 10224, "no exec duration": 41432000000, "no exec requests": 196, "pending": 0, "prog exec time": 563, "reproducing": 3, "rpc recv": 12396013072, "rpc sent": 4178687248, "signal": 108240, "smash jobs": 1186, "triage jobs": 11, "vm output": 75196583, "vm restarts [base]": 68, "vm restarts [new]": 175 } 2025/12/31 05:18:39 patched crashed: WARNING in dbAdjTree [need repro = false] 2025/12/31 05:18:46 runner 5 connected 2025/12/31 05:18:46 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:18:48 runner 0 connected 2025/12/31 05:18:52 runner 1 connected 2025/12/31 05:19:13 crash "UBSAN: array-index-out-of-bounds in dtInsertEntry" is already known 2025/12/31 05:19:13 base crash "UBSAN: array-index-out-of-bounds in dtInsertEntry" is to be ignored 2025/12/31 05:19:13 patched crashed: UBSAN: array-index-out-of-bounds in dtInsertEntry [need repro = false] 2025/12/31 05:19:16 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:19:35 runner 8 connected 2025/12/31 05:19:46 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 05:20:03 patched crashed: WARNING in hfs_bnode_create [need repro = false] 2025/12/31 05:20:10 runner 6 connected 2025/12/31 05:20:15 runner 2 connected 2025/12/31 05:20:17 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:20:28 base crash: kernel BUG in dbFindLeaf 2025/12/31 05:20:44 runner 5 connected 2025/12/31 05:20:49 base crash: UBSAN: array-index-out-of-bounds in dtSplitPage 2025/12/31 05:21:02 runner 4 connected 2025/12/31 05:21:12 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 05:21:23 crash "general protection fault in txEnd" is already known 2025/12/31 05:21:23 base crash "general protection fault in txEnd" is to be ignored 2025/12/31 05:21:23 patched crashed: general protection fault in txEnd [need repro = false] 2025/12/31 05:21:26 runner 0 connected 2025/12/31 05:21:48 runner 2 connected 2025/12/31 05:21:53 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:22:09 runner 7 connected 2025/12/31 05:22:16 patched crashed: possible deadlock in ocfs2_evict_inode [need repro = false] 2025/12/31 05:22:21 runner 5 connected 2025/12/31 05:22:51 runner 4 connected 2025/12/31 05:22:52 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:23:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 571, "corpus": 9563, "corpus [files]": 3682, "corpus [symbols]": 3877, "cover overflows": 21307, "coverage": 109732, "distributor delayed": 16284, "distributor undelayed": 16282, "distributor violated": 256, "exec candidate": 13577, "exec collide": 4772, "exec fuzz": 8856, "exec gen": 436, "exec hints": 1672, "exec inject": 0, "exec minimize": 18843, "exec retries": 0, "exec seeds": 3645, "exec smash": 8546, "exec total [base]": 40431, "exec total [new]": 106463, "exec triage": 29822, "executor restarts [base]": 777, "executor restarts [new]": 1984, "fault jobs": 0, "fuzzer jobs": 1756, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 4, "hints jobs": 527, "max signal": 113008, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 13845, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 10308, "no exec duration": 42354000000, "no exec requests": 199, "pending": 0, "prog exec time": 572, "reproducing": 3, "rpc recv": 13058912512, "rpc sent": 4447564736, "signal": 108397, "smash jobs": 1218, "triage jobs": 11, "vm output": 79718241, "vm restarts [base]": 73, "vm restarts [new]": 183 } 2025/12/31 05:23:23 runner 6 connected 2025/12/31 05:23:35 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:23:40 patched crashed: BUG: spinlock bad magic in corrupted [need repro = true] 2025/12/31 05:23:40 scheduled a reproduction of 'BUG: spinlock bad magic in corrupted' 2025/12/31 05:23:40 start reproducing 'BUG: spinlock bad magic in corrupted' 2025/12/31 05:23:52 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:24:02 base crash: possible deadlock in ocfs2_del_inode_from_orphan 2025/12/31 05:24:09 base crash: WARNING in dbAdjTree 2025/12/31 05:24:17 base crash: possible deadlock in run_unpack_ex 2025/12/31 05:24:42 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/12/31 05:24:49 crash "possible deadlock in ntfs_fiemap" is already known 2025/12/31 05:24:49 base crash "possible deadlock in ntfs_fiemap" is to be ignored 2025/12/31 05:24:49 patched crashed: possible deadlock in ntfs_fiemap [need repro = false] 2025/12/31 05:24:58 runner 0 connected 2025/12/31 05:25:06 runner 2 connected 2025/12/31 05:25:12 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:25:14 runner 1 connected 2025/12/31 05:25:38 base crash: kernel BUG in hfs_write_inode 2025/12/31 05:25:39 runner 7 connected 2025/12/31 05:25:46 runner 8 connected 2025/12/31 05:25:57 base crash: WARNING in hfs_bnode_create 2025/12/31 05:26:03 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 05:26:18 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:26:35 runner 0 connected 2025/12/31 05:26:54 runner 1 connected 2025/12/31 05:27:01 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:27:01 runner 6 connected 2025/12/31 05:27:12 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:27:15 base crash: kernel BUG in jfs_evict_inode 2025/12/31 05:27:19 reproducing crash 'BUG: spinlock bad magic in corrupted': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_metapage.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:27:37 patched crashed: possible deadlock in ocfs2_evict_inode [need repro = false] 2025/12/31 05:28:02 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:28:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 584, "corpus": 9594, "corpus [files]": 3686, "corpus [symbols]": 3881, "cover overflows": 22401, "coverage": 109833, "distributor delayed": 16383, "distributor undelayed": 16373, "distributor violated": 260, "exec candidate": 13577, "exec collide": 5010, "exec fuzz": 9321, "exec gen": 464, "exec hints": 1777, "exec inject": 0, "exec minimize": 19501, "exec retries": 0, "exec seeds": 3759, "exec smash": 9055, "exec total [base]": 42125, "exec total [new]": 108700, "exec triage": 29943, "executor restarts [base]": 825, "executor restarts [new]": 2053, "fault jobs": 0, "fuzzer jobs": 1776, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 2, "hints jobs": 520, "max signal": 113163, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 14398, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 10364, "no exec duration": 42354000000, "no exec requests": 199, "pending": 0, "prog exec time": 660, "reproducing": 4, "rpc recv": 13518750700, "rpc sent": 4637438264, "signal": 108497, "smash jobs": 1238, "triage jobs": 18, "vm output": 83746807, "vm restarts [base]": 78, "vm restarts [new]": 187 } 2025/12/31 05:28:12 runner 2 connected 2025/12/31 05:28:22 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:28:35 runner 8 connected 2025/12/31 05:28:55 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:28:59 runner 0 connected 2025/12/31 05:29:23 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 05:29:45 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:29:47 base crash: kernel BUG in hfs_write_inode 2025/12/31 05:29:57 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:30:01 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:30:11 repro finished 'INFO: task hung in f2fs_balance_fs', repro=false crepro=false desc='' hub=false from_dashboard=false 2025/12/31 05:30:11 failed repro for "INFO: task hung in f2fs_balance_fs", err=%!s() 2025/12/31 05:30:11 "INFO: task hung in f2fs_balance_fs": saved crash log into 1767159011.crash.log 2025/12/31 05:30:11 "INFO: task hung in f2fs_balance_fs": saved repro log into 1767159011.repro.log 2025/12/31 05:30:18 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:30:21 runner 6 connected 2025/12/31 05:30:28 reproducing crash 'BUG: spinlock bad magic in corrupted': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_metapage.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:30:37 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:30:42 runner 8 connected 2025/12/31 05:30:45 runner 1 connected 2025/12/31 05:30:54 runner 7 connected 2025/12/31 05:30:56 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:31:12 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:31:15 runner 1 connected 2025/12/31 05:31:20 runner 0 connected 2025/12/31 05:31:30 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 05:31:40 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:31:46 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:31:53 runner 0 connected 2025/12/31 05:32:02 reproducing crash 'BUG: spinlock bad magic in corrupted': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_metapage.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:32:07 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:32:09 runner 8 connected 2025/12/31 05:32:11 base crash: kernel BUG in ext4_do_writepages 2025/12/31 05:32:12 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:32:28 runner 6 connected 2025/12/31 05:32:43 runner 1 connected 2025/12/31 05:32:59 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:33:04 runner 7 connected 2025/12/31 05:33:09 runner 2 connected 2025/12/31 05:33:09 runner 1 connected 2025/12/31 05:33:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 603, "corpus": 9642, "corpus [files]": 3690, "corpus [symbols]": 3885, "cover overflows": 23253, "coverage": 110019, "distributor delayed": 16517, "distributor undelayed": 16515, "distributor violated": 260, "exec candidate": 13577, "exec collide": 5334, "exec fuzz": 9879, "exec gen": 499, "exec hints": 1893, "exec inject": 0, "exec minimize": 20101, "exec retries": 0, "exec seeds": 3918, "exec smash": 9702, "exec total [base]": 44281, "exec total [new]": 111324, "exec triage": 30118, "executor restarts [base]": 882, "executor restarts [new]": 2115, "fault jobs": 0, "fuzzer jobs": 1789, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 3, "hints jobs": 504, "max signal": 113378, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 14829, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 10431, "no exec duration": 42394000000, "no exec requests": 200, "pending": 0, "prog exec time": 642, "reproducing": 3, "rpc recv": 14157147824, "rpc sent": 4854670952, "signal": 108665, "smash jobs": 1273, "triage jobs": 12, "vm output": 86480775, "vm restarts [base]": 84, "vm restarts [new]": 197 } 2025/12/31 05:33:18 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:33:31 base crash: general protection fault in txEnd 2025/12/31 05:33:34 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:33:35 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:33:47 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 05:33:58 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:34:15 runner 0 connected 2025/12/31 05:34:29 runner 0 connected 2025/12/31 05:34:34 runner 1 connected 2025/12/31 05:34:36 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 05:34:46 runner 6 connected 2025/12/31 05:34:52 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:35:15 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:35:35 runner 7 connected 2025/12/31 05:36:13 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:36:18 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:36:20 runner 8 connected 2025/12/31 05:36:43 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:37:15 runner 6 connected 2025/12/31 05:37:22 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:37:34 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:37:42 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:37:47 runner 2 connected 2025/12/31 05:37:53 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:37:55 base crash: kernel BUG in jfs_evict_inode 2025/12/31 05:38:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 637, "corpus": 9706, "corpus [files]": 3703, "corpus [symbols]": 3900, "cover overflows": 25074, "coverage": 110178, "distributor delayed": 16659, "distributor undelayed": 16655, "distributor violated": 260, "exec candidate": 13577, "exec collide": 5827, "exec fuzz": 10881, "exec gen": 551, "exec hints": 2110, "exec inject": 0, "exec minimize": 21105, "exec retries": 0, "exec seeds": 4146, "exec smash": 10803, "exec total [base]": 46730, "exec total [new]": 115635, "exec triage": 30335, "executor restarts [base]": 932, "executor restarts [new]": 2190, "fault jobs": 0, "fuzzer jobs": 1807, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 3, "hints jobs": 483, "max signal": 113748, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 15473, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 10523, "no exec duration": 45559000000, "no exec requests": 208, "pending": 0, "prog exec time": 546, "reproducing": 3, "rpc recv": 14774024184, "rpc sent": 5184009336, "signal": 108804, "smash jobs": 1312, "triage jobs": 12, "vm output": 90473932, "vm restarts [base]": 87, "vm restarts [new]": 202 } 2025/12/31 05:38:20 runner 7 connected 2025/12/31 05:38:31 base crash: kernel BUG in hfs_write_inode 2025/12/31 05:38:38 runner 8 connected 2025/12/31 05:38:54 runner 0 connected 2025/12/31 05:39:14 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:39:28 runner 2 connected 2025/12/31 05:39:29 base crash: kernel BUG in jfs_evict_inode 2025/12/31 05:39:56 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:40:07 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:40:11 runner 8 connected 2025/12/31 05:40:25 runner 0 connected 2025/12/31 05:40:55 runner 6 connected 2025/12/31 05:41:03 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 05:41:06 runner 0 connected 2025/12/31 05:41:13 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:41:19 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:41:33 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 05:41:42 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:41:49 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:41:59 reproducing crash 'BUG: spinlock bad magic in corrupted': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_metapage.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:41:59 runner 1 connected 2025/12/31 05:42:08 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:42:15 base crash: kernel BUG in jfs_evict_inode 2025/12/31 05:42:16 runner 0 connected 2025/12/31 05:42:23 crash "possible deadlock in ocfs2_init_acl" is already known 2025/12/31 05:42:23 base crash "possible deadlock in ocfs2_init_acl" is to be ignored 2025/12/31 05:42:23 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/12/31 05:42:29 runner 0 connected 2025/12/31 05:42:40 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:42:40 runner 7 connected 2025/12/31 05:42:46 runner 8 connected 2025/12/31 05:42:58 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 05:43:04 runner 1 connected 2025/12/31 05:43:05 base crash: possible deadlock in ocfs2_del_inode_from_orphan 2025/12/31 05:43:06 runner 6 connected 2025/12/31 05:43:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 667, "corpus": 9754, "corpus [files]": 3704, "corpus [symbols]": 3901, "cover overflows": 26056, "coverage": 110325, "distributor delayed": 16748, "distributor undelayed": 16747, "distributor violated": 260, "exec candidate": 13577, "exec collide": 6181, "exec fuzz": 11511, "exec gen": 596, "exec hints": 2253, "exec inject": 0, "exec minimize": 21790, "exec retries": 0, "exec seeds": 4281, "exec smash": 11551, "exec total [base]": 48669, "exec total [new]": 118509, "exec triage": 30460, "executor restarts [base]": 987, "executor restarts [new]": 2287, "fault jobs": 0, "fuzzer jobs": 1823, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 2, "hints jobs": 484, "max signal": 113906, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 16003, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 10579, "no exec duration": 45559000000, "no exec requests": 208, "pending": 0, "prog exec time": 533, "reproducing": 3, "rpc recv": 15401831208, "rpc sent": 5414282968, "signal": 108942, "smash jobs": 1334, "triage jobs": 5, "vm output": 93675422, "vm restarts [base]": 92, "vm restarts [new]": 212 } 2025/12/31 05:43:22 runner 1 connected 2025/12/31 05:43:28 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:43:28 base crash: kernel BUG in jfs_evict_inode 2025/12/31 05:43:30 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:43:31 crash "general protection fault in lmLogSync" is already known 2025/12/31 05:43:31 base crash "general protection fault in lmLogSync" is to be ignored 2025/12/31 05:43:31 patched crashed: general protection fault in lmLogSync [need repro = false] 2025/12/31 05:43:55 runner 0 connected 2025/12/31 05:44:01 runner 2 connected 2025/12/31 05:44:06 reproducing crash 'BUG: spinlock bad magic in corrupted': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_metapage.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:44:11 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:44:18 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 05:44:21 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:44:26 runner 0 connected 2025/12/31 05:44:27 runner 1 connected 2025/12/31 05:44:30 runner 6 connected 2025/12/31 05:44:35 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:44:49 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:45:07 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:45:12 base crash: kernel BUG in hfs_write_inode 2025/12/31 05:45:16 runner 1 connected 2025/12/31 05:45:17 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 05:45:19 runner 0 connected 2025/12/31 05:45:33 reproducing crash 'BUG: spinlock bad magic in corrupted': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_metapage.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:45:33 runner 2 connected 2025/12/31 05:45:35 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:45:46 runner 0 connected 2025/12/31 05:46:08 runner 1 connected 2025/12/31 05:46:13 runner 7 connected 2025/12/31 05:46:34 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 05:47:01 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:47:02 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:47:04 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:47:09 reproducing crash 'BUG: spinlock bad magic in corrupted': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_metapage.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:47:22 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 05:47:31 runner 6 connected 2025/12/31 05:47:59 runner 0 connected 2025/12/31 05:47:59 runner 7 connected 2025/12/31 05:48:07 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:48:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 704, "corpus": 9819, "corpus [files]": 3712, "corpus [symbols]": 3912, "cover overflows": 27596, "coverage": 110477, "distributor delayed": 16893, "distributor undelayed": 16893, "distributor violated": 260, "exec candidate": 13577, "exec collide": 6704, "exec fuzz": 12464, "exec gen": 642, "exec hints": 2461, "exec inject": 0, "exec minimize": 22737, "exec retries": 0, "exec seeds": 4491, "exec smash": 12656, "exec total [base]": 50014, "exec total [new]": 122717, "exec triage": 30668, "executor restarts [base]": 1042, "executor restarts [new]": 2360, "fault jobs": 0, "fuzzer jobs": 1875, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 3, "hints jobs": 486, "max signal": 114334, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 16657, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 10673, "no exec duration": 46583000000, "no exec requests": 211, "pending": 0, "prog exec time": 542, "reproducing": 3, "rpc recv": 16075902424, "rpc sent": 5675754552, "signal": 109090, "smash jobs": 1378, "triage jobs": 11, "vm output": 97358111, "vm restarts [base]": 99, "vm restarts [new]": 220 } 2025/12/31 05:48:12 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:48:19 runner 8 connected 2025/12/31 05:48:27 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 05:48:38 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:48:49 base crash: INFO: task hung in __iterate_supers 2025/12/31 05:48:56 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:49:11 runner 0 connected 2025/12/31 05:49:25 runner 7 connected 2025/12/31 05:49:26 patched crashed: possible deadlock in ocfs2_del_inode_from_orphan [need repro = false] 2025/12/31 05:49:27 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:49:33 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:49:45 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:49:46 runner 2 connected 2025/12/31 05:50:02 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:50:04 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 05:50:23 runner 6 connected 2025/12/31 05:50:24 runner 0 connected 2025/12/31 05:50:31 runner 0 connected 2025/12/31 05:50:43 runner 1 connected 2025/12/31 05:50:54 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/12/31 05:51:03 runner 8 connected 2025/12/31 05:51:11 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:51:17 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 05:51:33 reproducing crash 'BUG: spinlock bad magic in corrupted': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_metapage.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:51:51 runner 6 connected 2025/12/31 05:52:08 runner 0 connected 2025/12/31 05:52:15 runner 7 connected 2025/12/31 05:52:20 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 05:52:27 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:52:29 base crash: INFO: task hung in __iterate_supers 2025/12/31 05:52:37 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:52:55 base crash: kernel BUG in jfs_evict_inode 2025/12/31 05:53:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 731, "corpus": 9849, "corpus [files]": 3714, "corpus [symbols]": 3914, "cover overflows": 28494, "coverage": 110560, "distributor delayed": 16981, "distributor undelayed": 16980, "distributor violated": 260, "exec candidate": 13577, "exec collide": 7083, "exec fuzz": 13139, "exec gen": 675, "exec hints": 2598, "exec inject": 0, "exec minimize": 23300, "exec retries": 0, "exec seeds": 4630, "exec smash": 13467, "exec total [base]": 52192, "exec total [new]": 125567, "exec triage": 30782, "executor restarts [base]": 1091, "executor restarts [new]": 2462, "fault jobs": 0, "fuzzer jobs": 1842, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 3, "hints jobs": 456, "max signal": 114439, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 17129, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 10725, "no exec duration": 46583000000, "no exec requests": 211, "pending": 0, "prog exec time": 565, "reproducing": 3, "rpc recv": 16660969332, "rpc sent": 5918110408, "signal": 109169, "smash jobs": 1375, "triage jobs": 11, "vm output": 100770645, "vm restarts [base]": 102, "vm restarts [new]": 229 } 2025/12/31 05:53:17 runner 1 connected 2025/12/31 05:53:24 runner 0 connected 2025/12/31 05:53:26 runner 1 connected 2025/12/31 05:53:43 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:53:54 runner 2 connected 2025/12/31 05:54:06 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 05:54:14 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:54:40 runner 8 connected 2025/12/31 05:54:51 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:54:57 base crash: kernel BUG in hfs_write_inode 2025/12/31 05:55:03 runner 6 connected 2025/12/31 05:55:35 patched crashed: possible deadlock in ocfs2_del_inode_from_orphan [need repro = false] 2025/12/31 05:55:38 patched crashed: WARNING in dbAdjTree [need repro = false] 2025/12/31 05:55:42 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/12/31 05:55:42 base crash: kernel BUG in jfs_evict_inode 2025/12/31 05:55:49 runner 0 connected 2025/12/31 05:55:55 runner 1 connected 2025/12/31 05:55:55 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 05:56:07 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:56:13 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_txnmgr.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:56:27 base crash: kernel BUG in txUnlock 2025/12/31 05:56:31 reproducing crash 'BUG: spinlock bad magic in corrupted': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_metapage.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:56:31 runner 1 connected 2025/12/31 05:56:35 runner 8 connected 2025/12/31 05:56:38 runner 6 connected 2025/12/31 05:56:40 runner 0 connected 2025/12/31 05:56:52 runner 7 connected 2025/12/31 05:57:01 patched crashed: possible deadlock in ocfs2_evict_inode [need repro = false] 2025/12/31 05:57:22 base crash: kernel BUG in jfs_evict_inode 2025/12/31 05:57:26 runner 1 connected 2025/12/31 05:57:33 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 05:57:42 reproducing crash 'BUG: spinlock bad magic in corrupted': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_metapage.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:57:42 repro finished 'BUG: spinlock bad magic in corrupted', repro=true crepro=false desc='BUG: spinlock bad magic in release_metapage' hub=false from_dashboard=false 2025/12/31 05:57:42 found repro for "BUG: spinlock bad magic in release_metapage" (orig title: "BUG: spinlock bad magic in corrupted", reliability: 1), took 33.36 minutes 2025/12/31 05:57:42 "BUG: spinlock bad magic in release_metapage": saved crash log into 1767160662.crash.log 2025/12/31 05:57:42 "BUG: spinlock bad magic in release_metapage": saved repro log into 1767160662.repro.log 2025/12/31 05:57:43 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 05:57:50 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:57:57 runner 0 connected 2025/12/31 05:58:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 750, "corpus": 9899, "corpus [files]": 3724, "corpus [symbols]": 3925, "cover overflows": 29636, "coverage": 110702, "distributor delayed": 17076, "distributor undelayed": 17075, "distributor violated": 260, "exec candidate": 13577, "exec collide": 7501, "exec fuzz": 13885, "exec gen": 713, "exec hints": 2774, "exec inject": 0, "exec minimize": 24002, "exec retries": 0, "exec seeds": 4764, "exec smash": 14357, "exec total [base]": 53926, "exec total [new]": 128816, "exec triage": 30922, "executor restarts [base]": 1151, "executor restarts [new]": 2541, "fault jobs": 0, "fuzzer jobs": 1865, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 3, "hints jobs": 456, "max signal": 114662, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 17658, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 10790, "no exec duration": 47019000000, "no exec requests": 214, "pending": 0, "prog exec time": 527, "reproducing": 2, "rpc recv": 17309840840, "rpc sent": 6154939440, "signal": 109303, "smash jobs": 1402, "triage jobs": 7, "vm output": 104427128, "vm restarts [base]": 107, "vm restarts [new]": 239 } 2025/12/31 05:58:19 runner 2 connected 2025/12/31 05:58:24 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 05:58:26 base crash: kernel BUG in jfs_evict_inode 2025/12/31 05:58:39 runner 6 connected 2025/12/31 05:58:48 runner 1 connected 2025/12/31 05:58:49 runner 2 connected 2025/12/31 05:58:58 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 05:59:00 attempt #0 to run "BUG: spinlock bad magic in release_metapage" on base: aborting due to context cancelation 2025/12/31 05:59:21 runner 8 connected 2025/12/31 05:59:22 runner 1 connected 2025/12/31 05:59:32 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 05:59:41 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 05:59:56 runner 7 connected 2025/12/31 05:59:57 runner 0 connected 2025/12/31 06:00:10 reproducing crash 'INFO: task hung in blkdev_write_iter': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 06:00:10 repro finished 'INFO: task hung in blkdev_write_iter', repro=true crepro=false desc='kernel BUG in jfs_evict_inode' hub=false from_dashboard=false 2025/12/31 06:00:10 found repro for "kernel BUG in jfs_evict_inode" (orig title: "INFO: task hung in blkdev_write_iter", reliability: 0), took 52.81 minutes 2025/12/31 06:00:10 kernel BUG in jfs_evict_inode: repro is too unreliable, skipping 2025/12/31 06:00:10 "kernel BUG in jfs_evict_inode": saved crash log into 1767160810.crash.log 2025/12/31 06:00:10 "kernel BUG in jfs_evict_inode": saved repro log into 1767160810.repro.log 2025/12/31 06:00:24 runner 3 connected 2025/12/31 06:00:31 runner 0 connected 2025/12/31 06:00:51 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:00:55 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:01:05 patched crashed: possible deadlock in ocfs2_del_inode_from_orphan [need repro = false] 2025/12/31 06:01:16 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 06:01:22 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:01:49 runner 1 connected 2025/12/31 06:01:52 runner 0 connected 2025/12/31 06:02:03 runner 2 connected 2025/12/31 06:02:06 patched crashed: possible deadlock in ocfs2_del_inode_from_orphan [need repro = false] 2025/12/31 06:02:14 runner 3 connected 2025/12/31 06:02:16 crash "KASAN: use-after-free Read in hpfs_get_ea" is already known 2025/12/31 06:02:16 base crash "KASAN: use-after-free Read in hpfs_get_ea" is to be ignored 2025/12/31 06:02:16 patched crashed: KASAN: use-after-free Read in hpfs_get_ea [need repro = false] 2025/12/31 06:02:20 runner 0 connected 2025/12/31 06:02:38 base crash: KASAN: slab-out-of-bounds Read in dtSplitPage 2025/12/31 06:02:58 crash "INFO: trying to register non-static key in ocfs2_dlm_shutdown" is already known 2025/12/31 06:02:58 base crash "INFO: trying to register non-static key in ocfs2_dlm_shutdown" is to be ignored 2025/12/31 06:02:58 patched crashed: INFO: trying to register non-static key in ocfs2_dlm_shutdown [need repro = false] 2025/12/31 06:03:02 runner 6 connected 2025/12/31 06:03:08 base crash: KASAN: slab-out-of-bounds Read in dtSplitPage 2025/12/31 06:03:11 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:03:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 786, "corpus": 9957, "corpus [files]": 3732, "corpus [symbols]": 3935, "cover overflows": 31186, "coverage": 110812, "distributor delayed": 17206, "distributor undelayed": 17206, "distributor violated": 260, "exec candidate": 13577, "exec collide": 8109, "exec fuzz": 14958, "exec gen": 756, "exec hints": 2989, "exec inject": 0, "exec minimize": 25013, "exec retries": 0, "exec seeds": 4969, "exec smash": 15664, "exec total [base]": 55839, "exec total [new]": 133499, "exec triage": 31138, "executor restarts [base]": 1202, "executor restarts [new]": 2685, "fault jobs": 0, "fuzzer jobs": 1878, "fuzzing VMs [base]": 0, "fuzzing VMs [new]": 5, "hints jobs": 449, "max signal": 114958, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 18386, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 10888, "no exec duration": 47019000000, "no exec requests": 214, "pending": 0, "prog exec time": 513, "reproducing": 1, "rpc recv": 18015779020, "rpc sent": 6460795992, "signal": 109404, "smash jobs": 1421, "triage jobs": 8, "vm output": 109143499, "vm restarts [base]": 112, "vm restarts [new]": 250 } 2025/12/31 06:03:13 runner 0 connected 2025/12/31 06:03:35 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:03:37 runner 2 connected 2025/12/31 06:03:49 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 06:03:54 runner 8 connected 2025/12/31 06:04:03 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 06:04:05 runner 1 connected 2025/12/31 06:04:10 runner 0 connected 2025/12/31 06:04:31 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:04:33 runner 1 connected 2025/12/31 06:04:33 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:04:46 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:04:49 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:04:52 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:04:55 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:05:01 runner 0 connected 2025/12/31 06:05:14 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:05:22 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:05:29 runner 7 connected 2025/12/31 06:05:29 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:05:31 runner 6 connected 2025/12/31 06:05:42 runner 3 connected 2025/12/31 06:05:43 runner 2 connected 2025/12/31 06:05:46 runner 2 connected 2025/12/31 06:05:50 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:05:51 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:05:52 runner 1 connected 2025/12/31 06:06:03 runner 8 connected 2025/12/31 06:06:03 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:06:15 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:06:16 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:06:20 runner 0 connected 2025/12/31 06:06:22 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:06:23 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:06:23 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:06:26 runner 1 connected 2025/12/31 06:06:42 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:06:48 runner 7 connected 2025/12/31 06:06:48 runner 6 connected 2025/12/31 06:06:52 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 06:06:54 runner 3 connected 2025/12/31 06:07:09 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:07:13 runner 2 connected 2025/12/31 06:07:13 runner 2 connected 2025/12/31 06:07:14 runner 0 connected 2025/12/31 06:07:14 runner 1 connected 2025/12/31 06:07:19 runner 8 connected 2025/12/31 06:07:21 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:07:22 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:07:32 runner 0 connected 2025/12/31 06:07:34 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:07:38 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:07:41 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:07:57 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:08:06 runner 6 connected 2025/12/31 06:08:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 793, "corpus": 9978, "corpus [files]": 3733, "corpus [symbols]": 3937, "cover overflows": 31716, "coverage": 110948, "distributor delayed": 17244, "distributor undelayed": 17243, "distributor violated": 260, "exec candidate": 13577, "exec collide": 8307, "exec fuzz": 15348, "exec gen": 773, "exec hints": 3056, "exec inject": 0, "exec minimize": 25327, "exec retries": 0, "exec seeds": 5035, "exec smash": 16133, "exec total [base]": 57618, "exec total [new]": 135081, "exec triage": 31198, "executor restarts [base]": 1271, "executor restarts [new]": 2793, "fault jobs": 0, "fuzzer jobs": 1849, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 0, "hints jobs": 440, "max signal": 115002, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 18658, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 10915, "no exec duration": 47019000000, "no exec requests": 214, "pending": 0, "prog exec time": 0, "reproducing": 1, "rpc recv": 18951286524, "rpc sent": 6648806992, "signal": 109529, "smash jobs": 1407, "triage jobs": 2, "vm output": 111163200, "vm restarts [base]": 119, "vm restarts [new]": 269 } 2025/12/31 06:08:20 runner 3 connected 2025/12/31 06:08:20 runner 7 connected 2025/12/31 06:08:28 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:08:32 runner 2 connected 2025/12/31 06:08:34 runner 1 connected 2025/12/31 06:08:37 runner 8 connected 2025/12/31 06:08:40 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:08:42 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:08:48 runner 0 connected 2025/12/31 06:08:54 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:09:06 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:09:26 crash "possible deadlock in ocfs2_setattr" is already known 2025/12/31 06:09:26 base crash "possible deadlock in ocfs2_setattr" is to be ignored 2025/12/31 06:09:26 patched crashed: possible deadlock in ocfs2_setattr [need repro = false] 2025/12/31 06:09:26 runner 6 connected 2025/12/31 06:09:36 runner 3 connected 2025/12/31 06:09:39 runner 1 connected 2025/12/31 06:09:39 crash "possible deadlock in ocfs2_init_acl" is already known 2025/12/31 06:09:39 base crash "possible deadlock in ocfs2_init_acl" is to be ignored 2025/12/31 06:09:39 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/12/31 06:09:51 runner 2 connected 2025/12/31 06:09:57 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 06:10:04 runner 0 connected 2025/12/31 06:10:17 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/12/31 06:10:22 runner 1 connected 2025/12/31 06:10:32 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:10:34 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:10:36 runner 0 connected 2025/12/31 06:10:37 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:10:40 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:10:55 crash "possible deadlock in ocfs2_xattr_set" is already known 2025/12/31 06:10:55 base crash "possible deadlock in ocfs2_xattr_set" is to be ignored 2025/12/31 06:10:55 patched crashed: possible deadlock in ocfs2_xattr_set [need repro = false] 2025/12/31 06:11:14 runner 7 connected 2025/12/31 06:11:28 runner 0 connected 2025/12/31 06:11:32 runner 1 connected 2025/12/31 06:11:34 runner 3 connected 2025/12/31 06:11:37 patched crashed: WARNING in hfs_bnode_create [need repro = false] 2025/12/31 06:11:37 runner 2 connected 2025/12/31 06:11:39 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:11:49 patched crashed: WARNING in hfs_bnode_create [need repro = false] 2025/12/31 06:11:53 runner 2 connected 2025/12/31 06:12:03 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:12:15 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:12:32 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:12:33 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:12:35 runner 8 connected 2025/12/31 06:12:36 runner 0 connected 2025/12/31 06:12:46 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:12:46 runner 6 connected 2025/12/31 06:13:00 runner 3 connected 2025/12/31 06:13:02 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 06:13:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 825, "corpus": 10017, "corpus [files]": 3737, "corpus [symbols]": 3941, "cover overflows": 32984, "coverage": 111175, "distributor delayed": 17347, "distributor undelayed": 17347, "distributor violated": 260, "exec candidate": 13577, "exec collide": 8825, "exec fuzz": 16273, "exec gen": 816, "exec hints": 3262, "exec inject": 0, "exec minimize": 25948, "exec retries": 0, "exec seeds": 5192, "exec smash": 17259, "exec total [base]": 59268, "exec total [new]": 138840, "exec triage": 31345, "executor restarts [base]": 1317, "executor restarts [new]": 2912, "fault jobs": 0, "fuzzer jobs": 1844, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 5, "hints jobs": 425, "max signal": 115214, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 19171, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 10985, "no exec duration": 49208000000, "no exec requests": 220, "pending": 0, "prog exec time": 597, "reproducing": 1, "rpc recv": 19875945976, "rpc sent": 6918355392, "signal": 109612, "smash jobs": 1410, "triage jobs": 9, "vm output": 115107035, "vm restarts [base]": 124, "vm restarts [new]": 287 } 2025/12/31 06:13:12 runner 1 connected 2025/12/31 06:13:29 runner 7 connected 2025/12/31 06:13:32 runner 2 connected 2025/12/31 06:13:42 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:13:44 runner 2 connected 2025/12/31 06:13:58 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 06:14:02 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 06:14:04 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:14:10 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:14:15 crash "possible deadlock in ocfs2_init_acl" is already known 2025/12/31 06:14:15 base crash "possible deadlock in ocfs2_init_acl" is to be ignored 2025/12/31 06:14:15 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/12/31 06:14:22 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:14:29 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:14:41 runner 1 connected 2025/12/31 06:14:56 runner 1 connected 2025/12/31 06:15:00 runner 7 connected 2025/12/31 06:15:02 runner 6 connected 2025/12/31 06:15:04 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:15:07 runner 8 connected 2025/12/31 06:15:11 base crash: possible deadlock in ocfs2_init_acl 2025/12/31 06:15:12 runner 2 connected 2025/12/31 06:15:19 runner 3 connected 2025/12/31 06:15:28 runner 0 connected 2025/12/31 06:15:55 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:15:58 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 06:16:01 runner 2 connected 2025/12/31 06:16:09 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 06:16:09 runner 1 connected 2025/12/31 06:16:13 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:16:24 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:16:47 base crash: kernel BUG in txUnlock 2025/12/31 06:16:53 runner 3 connected 2025/12/31 06:16:57 runner 2 connected 2025/12/31 06:16:59 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:17:10 runner 1 connected 2025/12/31 06:17:23 runner 7 connected 2025/12/31 06:17:44 runner 0 connected 2025/12/31 06:17:53 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 06:17:57 runner 2 connected 2025/12/31 06:18:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 856, "corpus": 10083, "corpus [files]": 3745, "corpus [symbols]": 3951, "cover overflows": 34882, "coverage": 111384, "distributor delayed": 17468, "distributor undelayed": 17468, "distributor violated": 260, "exec candidate": 13577, "exec collide": 9426, "exec fuzz": 17401, "exec gen": 863, "exec hints": 3506, "exec inject": 0, "exec minimize": 27202, "exec retries": 0, "exec seeds": 5363, "exec smash": 18625, "exec total [base]": 60894, "exec total [new]": 143889, "exec triage": 31573, "executor restarts [base]": 1367, "executor restarts [new]": 3019, "fault jobs": 0, "fuzzer jobs": 1871, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 6, "hints jobs": 427, "max signal": 115718, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 20018, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 11087, "no exec duration": 50070000000, "no exec requests": 223, "pending": 0, "prog exec time": 627, "reproducing": 1, "rpc recv": 20694909844, "rpc sent": 7232345240, "signal": 109927, "smash jobs": 1430, "triage jobs": 14, "vm output": 120383841, "vm restarts [base]": 132, "vm restarts [new]": 299 } 2025/12/31 06:18:50 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 06:18:56 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:18:56 runner 3 connected 2025/12/31 06:19:14 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 06:19:16 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:19:19 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:19:49 runner 1 connected 2025/12/31 06:19:53 runner 0 connected 2025/12/31 06:20:14 runner 7 connected 2025/12/31 06:20:16 runner 1 connected 2025/12/31 06:20:22 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:20:38 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 06:20:49 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:20:50 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:21:00 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:21:19 runner 1 connected 2025/12/31 06:21:34 runner 2 connected 2025/12/31 06:21:37 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:21:47 runner 2 connected 2025/12/31 06:21:48 runner 6 connected 2025/12/31 06:21:58 runner 0 connected 2025/12/31 06:22:18 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 06:22:31 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:22:34 runner 8 connected 2025/12/31 06:22:34 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:22:48 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:23:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 885, "corpus": 10132, "corpus [files]": 3749, "corpus [symbols]": 3955, "cover overflows": 36770, "coverage": 111591, "distributor delayed": 17579, "distributor undelayed": 17579, "distributor violated": 260, "exec candidate": 13577, "exec collide": 10098, "exec fuzz": 18654, "exec gen": 924, "exec hints": 3769, "exec inject": 0, "exec minimize": 28141, "exec retries": 0, "exec seeds": 5546, "exec smash": 20157, "exec total [base]": 63302, "exec total [new]": 148995, "exec triage": 31769, "executor restarts [base]": 1430, "executor restarts [new]": 3158, "fault jobs": 0, "fuzzer jobs": 1844, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 5, "hints jobs": 413, "max signal": 115998, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 20723, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 11172, "no exec duration": 51393000000, "no exec requests": 227, "pending": 0, "prog exec time": 751, "reproducing": 1, "rpc recv": 21284001464, "rpc sent": 7586586592, "signal": 110085, "smash jobs": 1421, "triage jobs": 10, "vm output": 125672754, "vm restarts [base]": 135, "vm restarts [new]": 307 } 2025/12/31 06:23:24 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:23:31 runner 7 connected 2025/12/31 06:23:36 runner 3 connected 2025/12/31 06:23:45 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:23:45 runner 1 connected 2025/12/31 06:23:54 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:24:21 runner 2 connected 2025/12/31 06:24:27 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 06:24:43 runner 1 connected 2025/12/31 06:24:50 runner 2 connected 2025/12/31 06:25:07 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:25:09 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:25:28 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:25:29 crash "possible deadlock in ocfs2_reserve_suballoc_bits" is already known 2025/12/31 06:25:29 base crash "possible deadlock in ocfs2_reserve_suballoc_bits" is to be ignored 2025/12/31 06:25:29 patched crashed: possible deadlock in ocfs2_reserve_suballoc_bits [need repro = false] 2025/12/31 06:25:30 runner 3 connected 2025/12/31 06:25:32 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:25:44 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 06:25:50 patched crashed: possible deadlock in run_unpack_ex [need repro = false] 2025/12/31 06:26:05 runner 0 connected 2025/12/31 06:26:08 runner 0 connected 2025/12/31 06:26:26 runner 7 connected 2025/12/31 06:26:26 runner 2 connected 2025/12/31 06:26:31 runner 1 connected 2025/12/31 06:26:46 runner 8 connected 2025/12/31 06:26:54 base crash: possible deadlock in ocfs2_evict_inode 2025/12/31 06:26:55 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:26:58 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:27:28 patched crashed: possible deadlock in ocfs2_del_inode_from_orphan [need repro = false] 2025/12/31 06:27:52 runner 1 connected 2025/12/31 06:27:53 runner 3 connected 2025/12/31 06:27:55 runner 0 connected 2025/12/31 06:28:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 920, "corpus": 10178, "corpus [files]": 3754, "corpus [symbols]": 3961, "cover overflows": 39186, "coverage": 112037, "distributor delayed": 17697, "distributor undelayed": 17697, "distributor violated": 260, "exec candidate": 13577, "exec collide": 10779, "exec fuzz": 19987, "exec gen": 998, "exec hints": 4048, "exec inject": 0, "exec minimize": 29159, "exec retries": 0, "exec seeds": 5732, "exec smash": 21776, "exec total [base]": 65225, "exec total [new]": 154392, "exec triage": 31966, "executor restarts [base]": 1491, "executor restarts [new]": 3291, "fault jobs": 0, "fuzzer jobs": 1819, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 6, "hints jobs": 390, "max signal": 116401, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 21543, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 11258, "no exec duration": 51978000000, "no exec requests": 229, "pending": 0, "prog exec time": 586, "reproducing": 1, "rpc recv": 21995660832, "rpc sent": 7943086576, "signal": 110438, "smash jobs": 1420, "triage jobs": 9, "vm output": 129988865, "vm restarts [base]": 140, "vm restarts [new]": 318 } 2025/12/31 06:28:26 runner 6 connected 2025/12/31 06:28:30 crash "KASAN: slab-use-after-free Write in txEnd" is already known 2025/12/31 06:28:30 base crash "KASAN: slab-use-after-free Write in txEnd" is to be ignored 2025/12/31 06:28:30 patched crashed: KASAN: slab-use-after-free Write in txEnd [need repro = false] 2025/12/31 06:28:40 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:28:45 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:28:48 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/12/31 06:29:22 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:29:29 runner 8 connected 2025/12/31 06:29:36 runner 1 connected 2025/12/31 06:29:44 runner 1 connected 2025/12/31 06:29:45 runner 7 connected 2025/12/31 06:29:51 patched crashed: general protection fault in txEnd [need repro = false] 2025/12/31 06:30:18 runner 2 connected 2025/12/31 06:30:19 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:30:24 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:30:25 crash "possible deadlock in attr_data_get_block" is already known 2025/12/31 06:30:25 base crash "possible deadlock in attr_data_get_block" is to be ignored 2025/12/31 06:30:25 patched crashed: possible deadlock in attr_data_get_block [need repro = false] 2025/12/31 06:30:49 runner 6 connected 2025/12/31 06:31:06 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:31:12 patched crashed: WARNING in hfs_bnode_create [need repro = false] 2025/12/31 06:31:17 runner 8 connected 2025/12/31 06:31:21 runner 2 connected 2025/12/31 06:31:22 runner 1 connected 2025/12/31 06:31:52 crash "possible deadlock in ocfs2_setattr" is already known 2025/12/31 06:31:52 base crash "possible deadlock in ocfs2_setattr" is to be ignored 2025/12/31 06:31:52 patched crashed: possible deadlock in ocfs2_setattr [need repro = false] 2025/12/31 06:31:53 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:32:04 runner 7 connected 2025/12/31 06:32:08 runner 0 connected 2025/12/31 06:32:49 runner 2 connected 2025/12/31 06:32:51 base crash: kernel BUG in txUnlock 2025/12/31 06:32:51 runner 3 connected 2025/12/31 06:33:03 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:33:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 953, "corpus": 10219, "corpus [files]": 3759, "corpus [symbols]": 3968, "cover overflows": 40975, "coverage": 112228, "distributor delayed": 17790, "distributor undelayed": 17790, "distributor violated": 260, "exec candidate": 13577, "exec collide": 11385, "exec fuzz": 21139, "exec gen": 1041, "exec hints": 4291, "exec inject": 0, "exec minimize": 29973, "exec retries": 0, "exec seeds": 5877, "exec smash": 23192, "exec total [base]": 67347, "exec total [new]": 158967, "exec triage": 32122, "executor restarts [base]": 1556, "executor restarts [new]": 3417, "fault jobs": 0, "fuzzer jobs": 1792, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 6, "hints jobs": 371, "max signal": 116556, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 22208, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 11333, "no exec duration": 57460000000, "no exec requests": 238, "pending": 0, "prog exec time": 664, "reproducing": 1, "rpc recv": 22657336440, "rpc sent": 8277314992, "signal": 110529, "smash jobs": 1409, "triage jobs": 12, "vm output": 134936312, "vm restarts [base]": 143, "vm restarts [new]": 329 } 2025/12/31 06:33:33 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 06:33:48 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 06:33:49 runner 1 connected 2025/12/31 06:34:01 runner 1 connected 2025/12/31 06:34:05 crash "possible deadlock in ocfs2_reserve_suballoc_bits" is already known 2025/12/31 06:34:05 base crash "possible deadlock in ocfs2_reserve_suballoc_bits" is to be ignored 2025/12/31 06:34:05 patched crashed: possible deadlock in ocfs2_reserve_suballoc_bits [need repro = false] 2025/12/31 06:34:14 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:34:15 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 06:34:30 runner 8 connected 2025/12/31 06:34:45 runner 3 connected 2025/12/31 06:35:03 runner 2 connected 2025/12/31 06:35:04 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 06:35:16 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:35:19 runner 1 connected 2025/12/31 06:35:25 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 06:35:28 patched crashed: possible deadlock in ocfs2_evict_inode [need repro = false] 2025/12/31 06:35:40 patched crashed: possible deadlock in ocfs2_del_inode_from_orphan [need repro = false] 2025/12/31 06:35:49 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:36:01 runner 0 connected 2025/12/31 06:36:15 runner 1 connected 2025/12/31 06:36:23 runner 3 connected 2025/12/31 06:36:25 runner 7 connected 2025/12/31 06:36:38 runner 2 connected 2025/12/31 06:36:39 base crash: INFO: task hung in __iterate_supers 2025/12/31 06:36:41 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/12/31 06:36:47 runner 0 connected 2025/12/31 06:36:55 patched crashed: possible deadlock in ocfs2_evict_inode [need repro = false] 2025/12/31 06:37:00 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:37:27 base crash: kernel BUG in jfs_evict_inode 2025/12/31 06:37:28 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:37:36 runner 2 connected 2025/12/31 06:37:36 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:37:37 runner 0 connected 2025/12/31 06:37:52 runner 6 connected 2025/12/31 06:37:57 runner 8 connected 2025/12/31 06:38:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 989, "corpus": 10254, "corpus [files]": 3763, "corpus [symbols]": 3972, "cover overflows": 42695, "coverage": 112351, "distributor delayed": 17882, "distributor undelayed": 17881, "distributor violated": 260, "exec candidate": 13577, "exec collide": 11944, "exec fuzz": 22300, "exec gen": 1102, "exec hints": 4536, "exec inject": 0, "exec minimize": 30607, "exec retries": 0, "exec seeds": 6004, "exec smash": 24605, "exec total [base]": 69206, "exec total [new]": 163303, "exec triage": 32254, "executor restarts [base]": 1603, "executor restarts [new]": 3540, "fault jobs": 0, "fuzzer jobs": 1754, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 5, "hints jobs": 359, "max signal": 116698, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 22751, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 11392, "no exec duration": 58619000000, "no exec requests": 243, "pending": 0, "prog exec time": 653, "reproducing": 1, "rpc recv": 23356830196, "rpc sent": 8591339512, "signal": 110654, "smash jobs": 1384, "triage jobs": 11, "vm output": 139717966, "vm restarts [base]": 147, "vm restarts [new]": 341 } 2025/12/31 06:38:20 patched crashed: WARNING in hfs_bnode_create [need repro = false] 2025/12/31 06:38:25 runner 7 connected 2025/12/31 06:38:25 runner 1 connected 2025/12/31 06:38:33 runner 0 connected 2025/12/31 06:38:57 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/12/31 06:39:02 base crash: possible deadlock in ntfs_fiemap 2025/12/31 06:39:16 runner 1 connected 2025/12/31 06:39:18 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:39:20 crash "possible deadlock in ocfs2_setattr" is already known 2025/12/31 06:39:20 base crash "possible deadlock in ocfs2_setattr" is to be ignored 2025/12/31 06:39:20 patched crashed: possible deadlock in ocfs2_setattr [need repro = false] 2025/12/31 06:39:42 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 06:39:55 runner 8 connected 2025/12/31 06:39:59 runner 2 connected 2025/12/31 06:40:02 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:40:16 runner 0 connected 2025/12/31 06:40:19 runner 7 connected 2025/12/31 06:40:24 base crash: possible deadlock in ocfs2_evict_inode 2025/12/31 06:40:38 runner 3 connected 2025/12/31 06:40:42 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:40:56 patched crashed: UBSAN: array-index-out-of-bounds in dtSplitPage [need repro = false] 2025/12/31 06:40:58 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:41:01 runner 1 connected 2025/12/31 06:41:20 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:41:23 runner 0 connected 2025/12/31 06:41:36 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:41:41 runner 2 connected 2025/12/31 06:41:45 base crash: possible deadlock in ocfs2_xattr_set 2025/12/31 06:41:52 runner 0 connected 2025/12/31 06:41:57 runner 8 connected 2025/12/31 06:42:18 runner 7 connected 2025/12/31 06:42:19 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:42:29 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:42:33 runner 1 connected 2025/12/31 06:42:36 patched crashed: possible deadlock in ocfs2_evict_inode [need repro = false] 2025/12/31 06:42:43 runner 2 connected 2025/12/31 06:42:50 crash "possible deadlock in ocfs2_setattr" is already known 2025/12/31 06:42:50 base crash "possible deadlock in ocfs2_setattr" is to be ignored 2025/12/31 06:42:50 patched crashed: possible deadlock in ocfs2_setattr [need repro = false] 2025/12/31 06:42:55 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:43:05 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 06:43:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 1016, "corpus": 10285, "corpus [files]": 3769, "corpus [symbols]": 3979, "cover overflows": 43990, "coverage": 112467, "distributor delayed": 17944, "distributor undelayed": 17941, "distributor violated": 260, "exec candidate": 13577, "exec collide": 12497, "exec fuzz": 23315, "exec gen": 1155, "exec hints": 4758, "exec inject": 0, "exec minimize": 31181, "exec retries": 0, "exec seeds": 6119, "exec smash": 25884, "exec total [base]": 70909, "exec total [new]": 167217, "exec triage": 32360, "executor restarts [base]": 1667, "executor restarts [new]": 3689, "fault jobs": 0, "fuzzer jobs": 1693, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 1, "hints jobs": 348, "max signal": 116860, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 23275, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 11442, "no exec duration": 59265000000, "no exec requests": 246, "pending": 0, "prog exec time": 837, "reproducing": 1, "rpc recv": 24071393192, "rpc sent": 8874142016, "signal": 110765, "smash jobs": 1339, "triage jobs": 6, "vm output": 144339832, "vm restarts [base]": 154, "vm restarts [new]": 351 } 2025/12/31 06:43:16 runner 0 connected 2025/12/31 06:43:18 patched crashed: possible deadlock in ocfs2_xattr_set [need repro = false] 2025/12/31 06:43:28 runner 2 connected 2025/12/31 06:43:32 base crash: kernel BUG in jfs_evict_inode 2025/12/31 06:43:35 runner 3 connected 2025/12/31 06:43:47 runner 8 connected 2025/12/31 06:43:49 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:43:51 runner 7 connected 2025/12/31 06:43:55 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:44:16 runner 6 connected 2025/12/31 06:44:30 runner 2 connected 2025/12/31 06:44:33 patched crashed: possible deadlock in run_unpack_ex [need repro = false] 2025/12/31 06:44:40 patched crashed: possible deadlock in hfs_extend_file [need repro = false] 2025/12/31 06:44:46 runner 0 connected 2025/12/31 06:44:53 runner 2 connected 2025/12/31 06:45:16 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 06:45:29 runner 1 connected 2025/12/31 06:45:33 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:45:34 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:45:38 runner 7 connected 2025/12/31 06:45:41 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:46:14 runner 6 connected 2025/12/31 06:46:23 patched crashed: possible deadlock in ocfs2_del_inode_from_orphan [need repro = false] 2025/12/31 06:46:30 runner 0 connected 2025/12/31 06:46:30 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:46:32 runner 3 connected 2025/12/31 06:46:40 runner 1 connected 2025/12/31 06:46:42 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:46:47 crash "possible deadlock in ocfs2_reserve_suballoc_bits" is already known 2025/12/31 06:46:47 base crash "possible deadlock in ocfs2_reserve_suballoc_bits" is to be ignored 2025/12/31 06:46:47 patched crashed: possible deadlock in ocfs2_reserve_suballoc_bits [need repro = false] 2025/12/31 06:46:49 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 06:46:59 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:47:21 runner 1 connected 2025/12/31 06:47:27 runner 7 connected 2025/12/31 06:47:37 base crash: possible deadlock in hfs_extend_file 2025/12/31 06:47:38 runner 2 connected 2025/12/31 06:47:40 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:47:45 runner 6 connected 2025/12/31 06:47:56 runner 0 connected 2025/12/31 06:48:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 1044, "corpus": 10326, "corpus [files]": 3771, "corpus [symbols]": 3981, "cover overflows": 45367, "coverage": 112608, "distributor delayed": 18051, "distributor undelayed": 18051, "distributor violated": 260, "exec candidate": 13577, "exec collide": 13033, "exec fuzz": 24421, "exec gen": 1206, "exec hints": 4996, "exec inject": 0, "exec minimize": 31823, "exec retries": 0, "exec seeds": 6229, "exec smash": 27229, "exec total [base]": 72872, "exec total [new]": 171406, "exec triage": 32514, "executor restarts [base]": 1722, "executor restarts [new]": 3808, "fault jobs": 0, "fuzzer jobs": 1691, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 4, "hints jobs": 351, "max signal": 117022, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 23792, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 11510, "no exec duration": 59838000000, "no exec requests": 248, "pending": 0, "prog exec time": 604, "reproducing": 1, "rpc recv": 24900275832, "rpc sent": 9185637440, "signal": 110878, "smash jobs": 1331, "triage jobs": 9, "vm output": 148529054, "vm restarts [base]": 158, "vm restarts [new]": 367 } 2025/12/31 06:48:14 crash "possible deadlock in ocfs2_reserve_suballoc_bits" is already known 2025/12/31 06:48:14 base crash "possible deadlock in ocfs2_reserve_suballoc_bits" is to be ignored 2025/12/31 06:48:14 patched crashed: possible deadlock in ocfs2_reserve_suballoc_bits [need repro = false] 2025/12/31 06:48:16 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 06:48:31 patched crashed: possible deadlock in run_unpack_ex [need repro = false] 2025/12/31 06:48:34 runner 1 connected 2025/12/31 06:48:37 runner 8 connected 2025/12/31 06:48:52 patched crashed: possible deadlock in ocfs2_xattr_set [need repro = false] 2025/12/31 06:49:09 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:49:11 runner 0 connected 2025/12/31 06:49:12 base crash: kernel BUG in jfs_evict_inode 2025/12/31 06:49:13 runner 6 connected 2025/12/31 06:49:20 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:49:29 runner 7 connected 2025/12/31 06:49:49 runner 3 connected 2025/12/31 06:50:07 runner 2 connected 2025/12/31 06:50:12 runner 2 connected 2025/12/31 06:50:16 runner 1 connected 2025/12/31 06:50:22 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:50:23 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:50:34 reproducing crash 'INFO: task hung in path_openat': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/inode.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 06:50:34 repro finished 'INFO: task hung in path_openat', repro=true crepro=false desc='INFO: task hung in jfs_commit_inode' hub=false from_dashboard=false 2025/12/31 06:50:34 found repro for "INFO: task hung in jfs_commit_inode" (orig title: "INFO: task hung in path_openat", reliability: 1), took 120.49 minutes 2025/12/31 06:50:34 "INFO: task hung in jfs_commit_inode": saved crash log into 1767163834.crash.log 2025/12/31 06:50:34 "INFO: task hung in jfs_commit_inode": saved repro log into 1767163834.repro.log 2025/12/31 06:50:37 runner 5 connected 2025/12/31 06:50:50 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:50:53 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:51:01 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:51:20 runner 8 connected 2025/12/31 06:51:20 runner 3 connected 2025/12/31 06:51:25 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:51:30 runner 4 connected 2025/12/31 06:51:34 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:51:47 runner 7 connected 2025/12/31 06:51:51 runner 2 connected 2025/12/31 06:51:57 runner 1 connected 2025/12/31 06:51:59 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 06:52:02 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:52:20 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:52:22 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 06:52:23 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:52:23 runner 6 connected 2025/12/31 06:52:31 runner 0 connected 2025/12/31 06:52:36 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:52:43 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:52:52 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:52:57 runner 5 connected 2025/12/31 06:53:01 runner 1 connected 2025/12/31 06:53:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 1063, "corpus": 10359, "corpus [files]": 3774, "corpus [symbols]": 3988, "cover overflows": 46717, "coverage": 112701, "distributor delayed": 18114, "distributor undelayed": 18113, "distributor violated": 260, "exec candidate": 13577, "exec collide": 13588, "exec fuzz": 25412, "exec gen": 1262, "exec hints": 5231, "exec inject": 0, "exec minimize": 32394, "exec retries": 0, "exec seeds": 6334, "exec smash": 28495, "exec total [base]": 74950, "exec total [new]": 175296, "exec triage": 32618, "executor restarts [base]": 1775, "executor restarts [new]": 3930, "fault jobs": 0, "fuzzer jobs": 1642, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 3, "hints jobs": 342, "max signal": 117165, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 24235, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 11558, "no exec duration": 59838000000, "no exec requests": 248, "pending": 0, "prog exec time": 595, "reproducing": 0, "rpc recv": 25735426080, "rpc sent": 9492030272, "signal": 110969, "smash jobs": 1295, "triage jobs": 5, "vm output": 152622244, "vm restarts [base]": 161, "vm restarts [new]": 384 } 2025/12/31 06:53:19 runner 8 connected 2025/12/31 06:53:20 runner 1 connected 2025/12/31 06:53:21 runner 7 connected 2025/12/31 06:53:35 runner 3 connected 2025/12/31 06:53:41 runner 4 connected 2025/12/31 06:53:51 runner 2 connected 2025/12/31 06:53:54 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:54:03 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:54:48 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:54:54 attempt #0 to run "INFO: task hung in jfs_commit_inode" on base: crashed with INFO: task hung in jfs_commit_inode 2025/12/31 06:54:54 crash "INFO: task hung in jfs_commit_inode" is already known 2025/12/31 06:54:54 base crash "INFO: task hung in jfs_commit_inode" is to be ignored 2025/12/31 06:54:54 crashes both: INFO: task hung in jfs_commit_inode / INFO: task hung in jfs_commit_inode 2025/12/31 06:54:58 runner 7 connected 2025/12/31 06:55:02 patched crashed: general protection fault in txEnd [need repro = false] 2025/12/31 06:55:08 runner 3 connected 2025/12/31 06:55:53 runner 2 connected 2025/12/31 06:56:00 runner 0 connected 2025/12/31 06:56:06 runner 4 connected 2025/12/31 06:56:26 crash "INFO: trying to register non-static key in ocfs2_dlm_shutdown" is already known 2025/12/31 06:56:26 base crash "INFO: trying to register non-static key in ocfs2_dlm_shutdown" is to be ignored 2025/12/31 06:56:26 patched crashed: INFO: trying to register non-static key in ocfs2_dlm_shutdown [need repro = false] 2025/12/31 06:56:31 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:56:35 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:56:48 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:56:51 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:56:59 crash "possible deadlock in ocfs2_reserve_suballoc_bits" is already known 2025/12/31 06:56:59 base crash "possible deadlock in ocfs2_reserve_suballoc_bits" is to be ignored 2025/12/31 06:56:59 patched crashed: possible deadlock in ocfs2_reserve_suballoc_bits [need repro = false] 2025/12/31 06:57:03 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:57:07 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:57:14 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:57:21 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 06:57:23 runner 1 connected 2025/12/31 06:57:29 runner 5 connected 2025/12/31 06:57:33 runner 1 connected 2025/12/31 06:57:46 runner 3 connected 2025/12/31 06:57:50 runner 6 connected 2025/12/31 06:57:56 runner 4 connected 2025/12/31 06:58:01 runner 2 connected 2025/12/31 06:58:06 runner 7 connected 2025/12/31 06:58:08 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/12/31 06:58:08 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 06:58:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 1090, "corpus": 10405, "corpus [files]": 3778, "corpus [symbols]": 3993, "cover overflows": 48758, "coverage": 112872, "distributor delayed": 18215, "distributor undelayed": 18215, "distributor violated": 260, "exec candidate": 13577, "exec collide": 14358, "exec fuzz": 26919, "exec gen": 1347, "exec hints": 5578, "exec inject": 0, "exec minimize": 33130, "exec retries": 0, "exec seeds": 6490, "exec smash": 30351, "exec total [base]": 76571, "exec total [new]": 180955, "exec triage": 32815, "executor restarts [base]": 1821, "executor restarts [new]": 4101, "fault jobs": 0, "fuzzer jobs": 1549, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 5, "hints jobs": 331, "max signal": 117414, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 24887, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 11639, "no exec duration": 62754000000, "no exec requests": 255, "pending": 0, "prog exec time": 600, "reproducing": 0, "rpc recv": 26490142856, "rpc sent": 9849907096, "signal": 111236, "smash jobs": 1215, "triage jobs": 3, "vm output": 158203954, "vm restarts [base]": 165, "vm restarts [new]": 399 } 2025/12/31 06:58:13 runner 2 connected 2025/12/31 06:58:18 runner 0 connected 2025/12/31 06:58:46 patched crashed: kernel BUG in ocfs2_set_new_buffer_uptodate [need repro = true] 2025/12/31 06:58:46 scheduled a reproduction of 'kernel BUG in ocfs2_set_new_buffer_uptodate' 2025/12/31 06:58:46 start reproducing 'kernel BUG in ocfs2_set_new_buffer_uptodate' 2025/12/31 06:58:58 crash "INFO: task hung in lock_metapage" is already known 2025/12/31 06:58:58 base crash "INFO: task hung in lock_metapage" is to be ignored 2025/12/31 06:58:58 patched crashed: INFO: task hung in lock_metapage [need repro = false] 2025/12/31 06:59:07 runner 5 connected 2025/12/31 06:59:11 runner 1 connected 2025/12/31 06:59:31 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 06:59:44 base crash: kernel BUG in hfs_write_inode 2025/12/31 06:59:44 runner 7 connected 2025/12/31 06:59:55 runner 8 connected 2025/12/31 07:00:04 patched crashed: WARNING in dbAdjTree [need repro = false] 2025/12/31 07:00:13 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 07:00:29 runner 6 connected 2025/12/31 07:00:31 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/12/31 07:00:36 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = false] 2025/12/31 07:00:42 runner 0 connected 2025/12/31 07:00:47 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 07:01:02 runner 2 connected 2025/12/31 07:01:26 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/12/31 07:01:28 runner 7 connected 2025/12/31 07:01:29 patched crashed: possible deadlock in run_unpack_ex [need repro = false] 2025/12/31 07:01:35 runner 8 connected 2025/12/31 07:01:46 runner 4 connected 2025/12/31 07:02:24 runner 6 connected 2025/12/31 07:02:24 base crash: possible deadlock in run_unpack_ex 2025/12/31 07:02:32 runner 5 connected 2025/12/31 07:02:42 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 07:02:49 base crash: KASAN: slab-out-of-bounds Read in dtSplitPage 2025/12/31 07:03:11 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 1106, "corpus": 10441, "corpus [files]": 3785, "corpus [symbols]": 4001, "cover overflows": 50573, "coverage": 113386, "distributor delayed": 18315, "distributor undelayed": 18315, "distributor violated": 260, "exec candidate": 13577, "exec collide": 15005, "exec fuzz": 28213, "exec gen": 1405, "exec hints": 5888, "exec inject": 0, "exec minimize": 33828, "exec retries": 0, "exec seeds": 6608, "exec smash": 31922, "exec total [base]": 78967, "exec total [new]": 185836, "exec triage": 32986, "executor restarts [base]": 1879, "executor restarts [new]": 4216, "fault jobs": 0, "fuzzer jobs": 1468, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 5, "hints jobs": 309, "max signal": 118039, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 25399, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 11711, "no exec duration": 64655000000, "no exec requests": 260, "pending": 0, "prog exec time": 656, "reproducing": 1, "rpc recv": 27191556248, "rpc sent": 10210626504, "signal": 111736, "smash jobs": 1147, "triage jobs": 12, "vm output": 163600850, "vm restarts [base]": 167, "vm restarts [new]": 411 } 2025/12/31 07:03:15 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 07:03:22 runner 2 connected 2025/12/31 07:03:26 patched crashed: kernel BUG in hfs_write_inode [need repro = false] 2025/12/31 07:03:45 runner 8 connected 2025/12/31 07:03:46 runner 0 connected 2025/12/31 07:04:02 base crash: kernel BUG in ocfs2_set_new_buffer_uptodate 2025/12/31 07:04:07 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 07:04:09 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 07:04:12 runner 7 connected 2025/12/31 07:04:22 runner 5 connected 2025/12/31 07:04:26 patched crashed: possible deadlock in ocfs2_evict_inode [need repro = false] 2025/12/31 07:04:37 base crash: kernel BUG in hfs_write_inode 2025/12/31 07:05:00 runner 1 connected 2025/12/31 07:05:04 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 07:05:06 runner 3 connected 2025/12/31 07:05:14 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 07:05:20 base crash: kernel BUG in jfs_evict_inode 2025/12/31 07:05:24 runner 4 connected 2025/12/31 07:05:35 runner 0 connected 2025/12/31 07:05:36 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/12/31 07:05:36 repro finished 'kernel BUG in ocfs2_set_new_buffer_uptodate', repro=true crepro=false desc='kernel BUG in ocfs2_set_new_buffer_uptodate' hub=false from_dashboard=false 2025/12/31 07:05:36 found repro for "kernel BUG in ocfs2_set_new_buffer_uptodate" (orig title: "-SAME-", reliability: 1), took 6.45 minutes 2025/12/31 07:05:36 failed to recv *flatrpc.InfoRequestRawT: EOF 2025/12/31 07:05:36 "kernel BUG in ocfs2_set_new_buffer_uptodate": saved crash log into 1767164736.crash.log 2025/12/31 07:05:36 "kernel BUG in ocfs2_set_new_buffer_uptodate": saved repro log into 1767164736.repro.log 2025/12/31 07:05:52 base crash: kernel BUG in hfs_write_inode 2025/12/31 07:06:02 runner 1 connected 2025/12/31 07:06:06 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 07:06:08 crash "possible deadlock in ntfs_look_for_free_space" is already known 2025/12/31 07:06:08 base crash "possible deadlock in ntfs_look_for_free_space" is to be ignored 2025/12/31 07:06:08 patched crashed: possible deadlock in ntfs_look_for_free_space [need repro = false] 2025/12/31 07:06:11 runner 2 connected 2025/12/31 07:06:18 runner 2 connected 2025/12/31 07:06:27 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 07:06:33 runner 0 connected 2025/12/31 07:06:51 runner 1 connected 2025/12/31 07:07:02 runner 7 connected 2025/12/31 07:07:05 runner 4 connected 2025/12/31 07:07:06 attempt #0 to run "kernel BUG in ocfs2_set_new_buffer_uptodate" on base: crashed with kernel BUG in ocfs2_set_new_buffer_uptodate 2025/12/31 07:07:06 crashes both: kernel BUG in ocfs2_set_new_buffer_uptodate / kernel BUG in ocfs2_set_new_buffer_uptodate 2025/12/31 07:07:08 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/12/31 07:07:10 base crash: possible deadlock in ocfs2_xattr_set 2025/12/31 07:07:12 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 07:07:24 runner 6 connected 2025/12/31 07:07:31 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/12/31 07:07:53 bug reporting terminated 2025/12/31 07:07:53 status reporting terminated 2025/12/31 07:07:53 repro loop terminated 2025/12/31 07:07:53 new: rpc server terminaled 2025/12/31 07:07:53 base: rpc server terminaled 2025/12/31 07:08:06 base: pool terminated 2025/12/31 07:08:06 base: kernel context loop terminated 2025/12/31 07:08:21 new: pool terminated 2025/12/31 07:08:21 new: kernel context loop terminated 2025/12/31 07:08:21 diff fuzzing terminated 2025/12/31 07:08:21 fuzzing is finished 2025/12/31 07:08:21 status at the end: Title On-Base On-Patched BUG: spinlock bad magic in corrupted 1 crashes BUG: spinlock bad magic in release_metapage [reproduced] INFO: task hung in __iterate_supers 4 crashes 2 crashes INFO: task hung in blkdev_write_iter 1 crashes INFO: task hung in f2fs_balance_fs 1 crashes INFO: task hung in jfs_commit_inode 1 crashes [reproduced] INFO: task hung in lock_metapage 1 crashes INFO: task hung in path_openat 1 crashes INFO: trying to register non-static key in ocfs2_dlm_shutdown 3 crashes KASAN: slab-out-of-bounds Read in dtSplitPage 7 crashes 28 crashes KASAN: slab-use-after-free Write in txEnd 1 crashes KASAN: use-after-free Read in hpfs_get_ea 1 crashes UBSAN: array-index-out-of-bounds in dtInsertEntry 1 crashes UBSAN: array-index-out-of-bounds in dtSplitPage 1 crashes 3 crashes WARNING in dbAdjTree 2 crashes 5 crashes WARNING in hfs_bnode_create 2 crashes 6 crashes WARNING in udf_truncate_extents 1 crashes general protection fault in lmLogSync 1 crashes general protection fault in txEnd 1 crashes 4 crashes kernel BUG in dbFindLeaf 1 crashes 1 crashes kernel BUG in ext4_do_writepages 1 crashes 1 crashes kernel BUG in hfs_write_inode 45 crashes 95 crashes kernel BUG in jfs_evict_inode 26 crashes 52 crashes[reproduced] kernel BUG in ocfs2_set_new_buffer_uptodate 2 crashes 1 crashes[reproduced] kernel BUG in txAbort 1 crashes kernel BUG in txLock 1 crashes kernel BUG in txUnlock 6 crashes 14 crashes possible deadlock in attr_data_get_block 2 crashes possible deadlock in hfs_extend_file 2 crashes 5 crashes possible deadlock in hfs_find_init 1 crashes possible deadlock in ntfs_fiemap 1 crashes 1 crashes possible deadlock in ntfs_look_for_free_space 3 crashes possible deadlock in ocfs2_del_inode_from_orphan 3 crashes 11 crashes possible deadlock in ocfs2_evict_inode 5 crashes 12 crashes possible deadlock in ocfs2_init_acl 1 crashes 9 crashes possible deadlock in ocfs2_reserve_suballoc_bits 5 crashes possible deadlock in ocfs2_setattr 4 crashes possible deadlock in ocfs2_try_remove_refcount_tree 54 crashes 124 crashes possible deadlock in ocfs2_xattr_set 2 crashes 3 crashes possible deadlock in run_unpack_ex 4 crashes 7 crashes