2025/08/16 18:57:55 extracted 303751 symbol hashes for base and 303753 for patched 2025/08/16 18:57:55 adding modified_functions to focus areas: ["__bio_release_pages" "__dec_node_state" "__f2fs_commit_super" "__filemap_fdatawait_range" "__filemap_get_folio" "__get_meta_folio" "__get_metapage" "__get_node_folio" "__inc_node_state" "__mem_cgroup_try_charge_swap" "__mem_cgroup_uncharge_swap" "__memcg_kmem_charge_page" "__memcg_kmem_uncharge_page" "__mod_node_page_state" "__node_get_memory_tier" "__pfx_filemap_mod_uncharged_vmstat" "__se_sys_cachestat" "attr_wof_frame_info" "bch2_mark_pagecache_reserved" "bch2_mark_pagecache_unallocated" "bch2_page_mkwrite" "bch2_seek_pagecache_data" "bio_set_pages_dirty" "block_page_mkwrite" "btrfs_cleanup_bg_io" "btrfs_cleanup_one_transaction" "btrfs_cleanup_transaction" "btrfs_clear_buffer_dirty" "btrfs_defrag_file" "btrfs_drop_and_free_fs_root" "btrfs_mark_buffer_dirty" "btrfs_page_mkwrite" "btrfs_read_merkle_tree_page" "btrfs_replay_log" "btrfs_truncate_block" "btrfs_writepage_fixup_worker" "btrfs_writepages" "ceph_page_mkwrite" "ceph_uninline_data" "ceph_writepages_start" "clean_bdev_aliases" "cpu_vm_stats_fold" "delete_from_page_cache_batch" "do_convert_inline_dir" "do_huge_pmd_wp_page" "do_read_cache_folio" "do_swap_page" "do_wp_page" "drain_obj_stock" "ecryptfs_write" "ext4_page_mkwrite" "ext4_write_begin" "f2fs_delete_entry" "f2fs_delete_inline_entry" "f2fs_flush_inline_data" "f2fs_fsync_node_pages" "f2fs_get_lock_data_folio" "f2fs_init_inode_metadata" "f2fs_invalidate_compress_pages" "f2fs_quota_read" "f2fs_recover_fsync_data" "f2fs_set_link" "f2fs_sync_meta_pages" "f2fs_sync_node_pages" "f2fs_truncate_inode_blocks" "f2fs_vm_page_mkwrite" "f2fs_write_begin" "f2fs_write_data_pages" "f2fs_write_multi_pages" "fb_deferred_io_mkwrite" "filemap_add_folio" "filemap_fault" "filemap_get_entry" "filemap_get_folios_contig" "filemap_get_folios_tag" "filemap_get_read_batch" "filemap_mod_uncharged_vmstat" "filemap_page_mkwrite" "filemap_read" "filemap_splice_read" "filemap_unaccount_folio" "find_get_block_common" "find_get_entries" "find_lock_entries" "folio_lock" "folio_mark_dirty_lock" "force_metapage" "free_percpu" "fuse_page_mkwrite" "generic_perform_write" "generic_pipe_buf_try_steal" "gfs2_page_mkwrite" "gfs2_trans_add_meta" "gfs2_write_cache_jdata" "grab_metapage" "hold_metapage" "invalidate_inode_pages2_range" "io_ctl_prepare_pages" "iomap_page_mkwrite" "ksm_do_scan" "ksm_get_folio" "lock_delalloc_folios" "lock_metapage" "lock_page" "lruvec_page_state" "lruvec_page_state_local" "make_device_exclusive" "mapping_seek_hole_data" "mem_cgroup_charge_skmem" "mem_cgroup_uncharge_skmem" "memcg1_stat_format" "memcg1_swapout" "memcg_page_state" "memcg_page_state_local" "memcg_page_state_local_output" "memcg_page_state_output" "memory_numa_stat_show" "memory_stat_format" "memtier_hotplug_callback" "migrate_device_coherent_folio" "migrate_pages" "migrate_pages_batch" "minix_add_link" "minix_delete_entry" "minix_set_link" "mod_memcg_lruvec_state" "mod_memcg_page_state" "mod_memcg_state" "mod_node_state" "move_pages_huge_pmd" "move_pages_pte" "mpage_prepare_extent_to_map" "need_update" "netfs_page_mkwrite" "next_uptodate_folio" "nfs_vm_page_mkwrite" "nilfs_add_link" "nilfs_begin_folio_io" "nilfs_btnode_delete" "nilfs_btnode_prepare_change_key" "nilfs_clear_dirty_pages" "nilfs_copy_back_pages" "nilfs_copy_dirty_pages" "nilfs_delete_entry" "nilfs_end_folio_io" "nilfs_find_uncommitted_extent" "nilfs_lookup_dirty_data_buffers" "nilfs_page_mkwrite" "nilfs_segctor_do_construct" "nilfs_set_link" "node_get_allowed_targets" "node_is_toptier" "node_page_state" "node_page_state_add" "node_page_state_pages" "node_read_vmstat" "ntfs_bio_fill_1" "nvmet_execute_disc_identify" "obj_cgroup_charge_pages" "obj_cgroup_charge_zswap" "obj_cgroup_may_zswap" "obj_cgroup_release" "obj_cgroup_uncharge_zswap" "ocfs2_duplicate_clusters_by_page" "ocfs2_grab_folios_for_write" "orangefs_page_mkwrite" "page_cache_pipe_buf_confirm" "page_cache_pipe_buf_try_steal" "pagetypeinfo_showfree_print" "pcpu_memcg_post_alloc_hook" "prepare_one_folio" "putback_movable_ops_page" "refill_obj_stock" "refresh_cpu_vm_stats" "release_metapage" "relocate_file_extent_cluster" "remove_device_exclusive_entry" "remove_inode_hugepages" "send_extent_data" "set_extent_buffer_dirty" "set_node_memory_tier" "shmem_get_folio_gfp" "shmem_get_partial_folio" "shmem_swapin_folio" "shmem_undo_range" "shrink_folio_list" "split_huge_pages_all" "truncate_inode_pages_range" "try_split_folio" "try_to_unuse" "ttm_backup_backup_page" "txLock" "ubifs_vm_page_mkwrite" "udf_expand_file_adinicb" "udf_page_mkwrite" "ufs_add_link" "ufs_delete_entry" "ufs_get_locked_folio" "ufs_set_link" "uncharge_batch" "unpin_user_page_range_dirty_lock" "unpin_user_pages_dirty_lock" "vfs_dedupe_file_range_compare" "vmstat_next" "vmstat_refresh" "vmstat_shepherd" "vmstat_show" "vmstat_start" "write_all_supers" "write_one_eb" "writeback_iter" "z_erofs_runqueue" "zone_device_page_init" "zoneinfo_show_print" "zswap_current_read" "zswap_shrinker_count"] 2025/08/16 18:57:55 adding directly modified files to focus areas: ["fs/btrfs/disk-io.c" "include/linux/mmzone.h" "include/linux/pagemap.h" "mm/filemap.c" "mm/vmstat.c"] 2025/08/16 18:57:56 downloaded the corpus from https://storage.googleapis.com/syzkaller/corpus/ci-upstream-kasan-gce-root-corpus.db 2025/08/16 18:58:53 runner 1 connected 2025/08/16 18:58:53 runner 9 connected 2025/08/16 18:58:53 runner 8 connected 2025/08/16 18:58:53 runner 0 connected 2025/08/16 18:58:53 runner 5 connected 2025/08/16 18:58:53 runner 3 connected 2025/08/16 18:58:53 runner 2 connected 2025/08/16 18:58:53 runner 0 connected 2025/08/16 18:58:53 runner 4 connected 2025/08/16 18:58:54 runner 1 connected 2025/08/16 18:58:54 runner 2 connected 2025/08/16 18:58:54 runner 6 connected 2025/08/16 18:59:00 initializing coverage information... 2025/08/16 18:59:00 executor cover filter: 0 PCs 2025/08/16 18:59:03 machine check: disabled the following syscalls: fsetxattr$security_selinux : selinux is not enabled fsetxattr$security_smack_transmute : smack is not enabled fsetxattr$smack_xattr_label : smack is not enabled get_thread_area : syscall get_thread_area is not present lookup_dcookie : syscall lookup_dcookie is not present lsetxattr$security_selinux : selinux is not enabled lsetxattr$security_smack_transmute : smack is not enabled lsetxattr$smack_xattr_label : smack is not enabled mount$esdfs : /proc/filesystems does not contain esdfs mount$incfs : /proc/filesystems does not contain incremental-fs openat$acpi_thermal_rel : failed to open /dev/acpi_thermal_rel: no such file or directory openat$ashmem : failed to open /dev/ashmem: no such file or directory openat$bifrost : failed to open /dev/bifrost: no such file or directory openat$binder : failed to open /dev/binder: no such file or directory openat$camx : failed to open /dev/v4l/by-path/platform-soc@0:qcom_cam-req-mgr-video-index0: no such file or directory openat$capi20 : failed to open /dev/capi20: no such file or directory openat$cdrom1 : failed to open /dev/cdrom1: no such file or directory openat$damon_attrs : failed to open /sys/kernel/debug/damon/attrs: no such file or directory openat$damon_init_regions : failed to open /sys/kernel/debug/damon/init_regions: no such file or directory openat$damon_kdamond_pid : failed to open /sys/kernel/debug/damon/kdamond_pid: no such file or directory openat$damon_mk_contexts : failed to open /sys/kernel/debug/damon/mk_contexts: no such file or directory openat$damon_monitor_on : failed to open /sys/kernel/debug/damon/monitor_on: no such file or directory openat$damon_rm_contexts : failed to open /sys/kernel/debug/damon/rm_contexts: no such file or directory openat$damon_schemes : failed to open /sys/kernel/debug/damon/schemes: no such file or directory openat$damon_target_ids : failed to open /sys/kernel/debug/damon/target_ids: no such file or directory openat$hwbinder : failed to open /dev/hwbinder: no such file or directory openat$i915 : failed to open /dev/i915: no such file or directory openat$img_rogue : failed to open /dev/img-rogue: no such file or directory openat$irnet : failed to open /dev/irnet: no such file or directory openat$keychord : failed to open /dev/keychord: no such file or directory openat$kvm : failed to open /dev/kvm: no such file or directory openat$lightnvm : failed to open /dev/lightnvm/control: no such file or directory openat$mali : failed to open /dev/mali0: no such file or directory openat$md : failed to open /dev/md0: no such file or directory openat$msm : failed to open /dev/msm: no such file or directory openat$ndctl0 : failed to open /dev/ndctl0: no such file or directory openat$nmem0 : failed to open /dev/nmem0: no such file or directory openat$pktcdvd : failed to open /dev/pktcdvd/control: no such file or directory openat$pmem0 : failed to open /dev/pmem0: no such file or directory openat$proc_capi20 : failed to open /proc/capi/capi20: no such file or directory openat$proc_capi20ncci : failed to open /proc/capi/capi20ncci: no such file or directory openat$proc_reclaim : failed to open /proc/self/reclaim: no such file or directory openat$ptp1 : failed to open /dev/ptp1: no such file or directory openat$rnullb : failed to open /dev/rnullb0: no such file or directory openat$selinux_access : failed to open /selinux/access: no such file or directory openat$selinux_attr : selinux is not enabled openat$selinux_avc_cache_stats : failed to open /selinux/avc/cache_stats: no such file or directory openat$selinux_avc_cache_threshold : failed to open /selinux/avc/cache_threshold: no such file or directory openat$selinux_avc_hash_stats : failed to open /selinux/avc/hash_stats: no such file or directory openat$selinux_checkreqprot : failed to open /selinux/checkreqprot: no such file or directory openat$selinux_commit_pending_bools : failed to open /selinux/commit_pending_bools: no such file or directory openat$selinux_context : failed to open /selinux/context: no such file or directory openat$selinux_create : failed to open /selinux/create: no such file or directory openat$selinux_enforce : failed to open /selinux/enforce: no such file or directory openat$selinux_load : failed to open /selinux/load: no such file or directory openat$selinux_member : failed to open /selinux/member: no such file or directory openat$selinux_mls : failed to open /selinux/mls: no such file or directory openat$selinux_policy : failed to open /selinux/policy: no such file or directory openat$selinux_relabel : failed to open /selinux/relabel: no such file or directory openat$selinux_status : failed to open /selinux/status: no such file or directory openat$selinux_user : failed to open /selinux/user: no such file or directory openat$selinux_validatetrans : failed to open /selinux/validatetrans: no such file or directory openat$sev : failed to open /dev/sev: no such file or directory openat$sgx_provision : failed to open /dev/sgx_provision: no such file or directory openat$smack_task_current : smack is not enabled openat$smack_thread_current : smack is not enabled openat$smackfs_access : failed to open /sys/fs/smackfs/access: no such file or directory openat$smackfs_ambient : failed to open /sys/fs/smackfs/ambient: no such file or directory openat$smackfs_change_rule : failed to open /sys/fs/smackfs/change-rule: no such file or directory openat$smackfs_cipso : failed to open /sys/fs/smackfs/cipso: no such file or directory openat$smackfs_cipsonum : failed to open /sys/fs/smackfs/direct: no such file or directory openat$smackfs_ipv6host : failed to open /sys/fs/smackfs/ipv6host: no such file or directory openat$smackfs_load : failed to open /sys/fs/smackfs/load: no such file or directory openat$smackfs_logging : failed to open /sys/fs/smackfs/logging: no such file or directory openat$smackfs_netlabel : failed to open /sys/fs/smackfs/netlabel: no such file or directory openat$smackfs_onlycap : failed to open /sys/fs/smackfs/onlycap: no such file or directory openat$smackfs_ptrace : failed to open /sys/fs/smackfs/ptrace: no such file or directory openat$smackfs_relabel_self : failed to open /sys/fs/smackfs/relabel-self: no such file or directory openat$smackfs_revoke_subject : failed to open /sys/fs/smackfs/revoke-subject: no such file or directory openat$smackfs_syslog : failed to open /sys/fs/smackfs/syslog: no such file or directory openat$smackfs_unconfined : failed to open /sys/fs/smackfs/unconfined: no such file or directory openat$tlk_device : failed to open /dev/tlk_device: no such file or directory openat$trusty : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_avb : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_gatekeeper : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwkey : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwrng : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km_secure : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_storage : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$tty : failed to open /dev/tty: no such device or address openat$uverbs0 : failed to open /dev/infiniband/uverbs0: no such file or directory openat$vfio : failed to open /dev/vfio/vfio: no such file or directory openat$vndbinder : failed to open /dev/vndbinder: no such file or directory openat$vtpm : failed to open /dev/vtpmx: no such file or directory openat$xenevtchn : failed to open /dev/xen/evtchn: no such file or directory openat$zygote : failed to open /dev/socket/zygote: no such file or directory pkey_alloc : pkey_alloc(0x0, 0x0) failed: no space left on device read$smackfs_access : smack is not enabled read$smackfs_cipsonum : smack is not enabled read$smackfs_logging : smack is not enabled read$smackfs_ptrace : smack is not enabled set_thread_area : syscall set_thread_area is not present setxattr$security_selinux : selinux is not enabled setxattr$security_smack_transmute : smack is not enabled setxattr$smack_xattr_label : smack is not enabled socket$hf : socket$hf(0x13, 0x2, 0x0) failed: address family not supported by protocol socket$inet6_dccp : socket$inet6_dccp(0xa, 0x6, 0x0) failed: socket type not supported socket$inet_dccp : socket$inet_dccp(0x2, 0x6, 0x0) failed: socket type not supported socket$vsock_dgram : socket$vsock_dgram(0x28, 0x2, 0x0) failed: no such device syz_btf_id_by_name$bpf_lsm : failed to open /sys/kernel/btf/vmlinux: no such file or directory syz_init_net_socket$bt_cmtp : syz_init_net_socket$bt_cmtp(0x1f, 0x3, 0x5) failed: protocol not supported syz_kvm_setup_cpu$ppc64 : unsupported arch syz_mount_image$ntfs : /proc/filesystems does not contain ntfs syz_mount_image$reiserfs : /proc/filesystems does not contain reiserfs syz_mount_image$sysv : /proc/filesystems does not contain sysv syz_mount_image$v7 : /proc/filesystems does not contain v7 syz_open_dev$dricontrol : failed to open /dev/dri/controlD#: no such file or directory syz_open_dev$drirender : failed to open /dev/dri/renderD#: no such file or directory syz_open_dev$floppy : failed to open /dev/fd#: no such file or directory syz_open_dev$ircomm : failed to open /dev/ircomm#: no such file or directory syz_open_dev$sndhw : failed to open /dev/snd/hwC#D#: no such file or directory syz_pkey_set : pkey_alloc(0x0, 0x0) failed: no space left on device uselib : syscall uselib is not present write$selinux_access : selinux is not enabled write$selinux_attr : selinux is not enabled write$selinux_context : selinux is not enabled write$selinux_create : selinux is not enabled write$selinux_load : selinux is not enabled write$selinux_user : selinux is not enabled write$selinux_validatetrans : selinux is not enabled write$smack_current : smack is not enabled write$smackfs_access : smack is not enabled write$smackfs_change_rule : smack is not enabled write$smackfs_cipso : smack is not enabled write$smackfs_cipsonum : smack is not enabled write$smackfs_ipv6host : smack is not enabled write$smackfs_label : smack is not enabled write$smackfs_labels_list : smack is not enabled write$smackfs_load : smack is not enabled write$smackfs_logging : smack is not enabled write$smackfs_netlabel : smack is not enabled write$smackfs_ptrace : smack is not enabled transitively disabled the following syscalls (missing resource [creating syscalls]): bind$vsock_dgram : sock_vsock_dgram [socket$vsock_dgram] close$ibv_device : fd_rdma [openat$uverbs0] connect$hf : sock_hf [socket$hf] connect$vsock_dgram : sock_vsock_dgram [socket$vsock_dgram] getsockopt$inet6_dccp_buf : sock_dccp6 [socket$inet6_dccp] getsockopt$inet6_dccp_int : sock_dccp6 [socket$inet6_dccp] getsockopt$inet_dccp_buf : sock_dccp [socket$inet_dccp] getsockopt$inet_dccp_int : sock_dccp [socket$inet_dccp] ioctl$ACPI_THERMAL_GET_ART : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_ART_COUNT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_ART_LEN : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT_COUNT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT_LEN : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ASHMEM_GET_NAME : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_PIN_STATUS : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_PROT_MASK : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_SIZE : fd_ashmem [openat$ashmem] ioctl$ASHMEM_PURGE_ALL_CACHES : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_NAME : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_PROT_MASK : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_SIZE : fd_ashmem [openat$ashmem] ioctl$CAPI_CLR_FLAGS : fd_capi20 [openat$capi20] ioctl$CAPI_GET_ERRCODE : fd_capi20 [openat$capi20] ioctl$CAPI_GET_FLAGS : fd_capi20 [openat$capi20] ioctl$CAPI_GET_MANUFACTURER : fd_capi20 [openat$capi20] ioctl$CAPI_GET_PROFILE : fd_capi20 [openat$capi20] ioctl$CAPI_GET_SERIAL : fd_capi20 [openat$capi20] ioctl$CAPI_INSTALLED : fd_capi20 [openat$capi20] ioctl$CAPI_MANUFACTURER_CMD : fd_capi20 [openat$capi20] ioctl$CAPI_NCCI_GETUNIT : fd_capi20 [openat$capi20] ioctl$CAPI_NCCI_OPENCOUNT : fd_capi20 [openat$capi20] ioctl$CAPI_REGISTER : fd_capi20 [openat$capi20] ioctl$CAPI_SET_FLAGS : fd_capi20 [openat$capi20] ioctl$CREATE_COUNTERS : fd_rdma [openat$uverbs0] ioctl$DESTROY_COUNTERS : fd_rdma [openat$uverbs0] ioctl$DRM_IOCTL_I915_GEM_BUSY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_DESTROY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER2 : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER2_WR : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_APERTURE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_CACHING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_TILING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MADVISE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP_GTT : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP_OFFSET : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PREAD : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PWRITE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_CACHING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_DOMAIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_TILING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SW_FINISH : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_THROTTLE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_UNPIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_USERPTR : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_VM_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_VM_DESTROY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_WAIT : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GET_RESET_STATS : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_OVERLAY_ATTRS : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_OVERLAY_PUT_IMAGE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_ADD_CONFIG : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_OPEN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_REMOVE_CONFIG : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_QUERY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_REG_READ : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_SET_SPRITE_COLORKEY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_MSM_GEM_CPU_FINI : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_CPU_PREP : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_INFO : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_MADVISE : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_NEW : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_SUBMIT : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GET_PARAM : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SET_PARAM : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_NEW : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_QUERY : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_WAIT_FENCE : fd_msm [openat$msm] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPEXEC: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_HTBUFFER_HTBLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_CHANGESPARSEMEM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTPIN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMREXPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRGETUID: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRIMPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNREFPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLWRITEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_CONNECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_DISCONNECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMSET: fd_rogue [openat$img_rogue] ioctl$FLOPPY_FDCLRPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDDEFPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDEJECT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFLUSH : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTBEG : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTEND : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTTRK : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVTYP : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETFDCSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETMAXERRS : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDMSGOFF : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDMSGON : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDPOLLDRVSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDRAWCMD : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDRESET : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETDRVPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETEMSGTRESH : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETMAXERRS : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDTWADDLE : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDWERRORCLR : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDWERRORGET : fd_floppy [syz_open_dev$floppy] ioctl$KBASE_HWCNT_READER_CLEAR : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_DISABLE_EVENT : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_DUMP : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_ENABLE_EVENT : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_API_VERSION : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_API_VERSION_WITH_FEATURES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER_SIZE : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER_WITH_CYCLES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_HWVER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_PUT_BUFFER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_PUT_BUFFER_WITH_CYCLES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_SET_INTERVAL : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_IOCTL_BUFFER_LIVENESS_UPDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CONTEXT_PRIORITY_CHECK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_CPU_QUEUE_DUMP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_EVENT_SIGNAL : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_GET_GLB_IFACE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_BIND : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_KICK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_REGISTER : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_REGISTER_EX : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_TERMINATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_TERM : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_DISJOINT_QUERY : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_FENCE_VALIDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_CONTEXT_ID : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_CPU_GPU_TIMEINFO : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_DDK_VERSION : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_GPUPROPS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_CLEAR : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_DUMP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_ENABLE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_READER_SETUP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_SET : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_JOB_SUBMIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_DELETE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_ENQUEUE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_CMD : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_GET_SAMPLE : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_PUT_SAMPLE : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALIAS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALLOC : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALLOC_EX : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_COMMIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_EXEC_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FIND_CPU_OFFSET : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET: fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FLAGS_CHANGE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FREE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_IMPORT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT_10_2 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT_11_5 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_PROFILE_ADD : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_QUERY : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_SYNC : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_POST_TERM : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_READ_USER_PAGE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SET_FLAGS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SET_LIMITED_CORE_COUNT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SOFT_EVENT_UPDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STICKY_RESOURCE_MAP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STICKY_RESOURCE_UNMAP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STREAM_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_TLSTREAM_ACQUIRE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_TLSTREAM_FLUSH : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_VERSION_CHECK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_VERSION_CHECK_RESERVED : fd_bifrost [openat$bifrost openat$mali] ioctl$KVM_ASSIGN_SET_MSIX_ENTRY : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_ASSIGN_SET_MSIX_NR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DIRTY_LOG_RING : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DIRTY_LOG_RING_ACQ_REL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DISABLE_QUIRKS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DISABLE_QUIRKS2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_ENFORCE_PV_FEATURE_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_EXCEPTION_PAYLOAD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_EXIT_HYPERCALL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_EXIT_ON_EMULATION_FAILURE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HALT_POLL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_DIRECT_TLBFLUSH : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_ENFORCE_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_ENLIGHTENED_VMCS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_SEND_IPI : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_SYNIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_SYNIC2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_TLBFLUSH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_VP_INDEX : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MAX_VCPU_ID : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MEMORY_FAULT_INFO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MSR_PLATFORM_INFO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_PMU_CAPABILITY : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_PTP_KVM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SGX_ATTRIBUTE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SPLIT_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_STEAL_TIME : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SYNC_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_VM_COPY_ENC_CONTEXT_FROM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_DISABLE_NX_HUGE_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_TYPES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X2APIC_API : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_APIC_BUS_CYCLES_NS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_BUS_LOCK_EXIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_DISABLE_EXITS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_GUEST_MODE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_NOTIFY_VMEXIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_USER_SPACE_MSR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_XEN_HVM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CHECK_EXTENSION : fd_kvm [openat$kvm] ioctl$KVM_CHECK_EXTENSION_VM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CLEAR_DIRTY_LOG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_DEVICE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_GUEST_MEMFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_VCPU : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_VM : fd_kvm [openat$kvm] ioctl$KVM_DIRTY_TLB : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_API_VERSION : fd_kvm [openat$kvm] ioctl$KVM_GET_CLOCK : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_CPUID2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEBUGREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_GET_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_DIRTY_LOG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_EMULATED_CPUID : fd_kvm [openat$kvm] ioctl$KVM_GET_FPU : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_LAPIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MP_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MSRS_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MSRS_sys : fd_kvm [openat$kvm] ioctl$KVM_GET_MSR_FEATURE_INDEX_LIST : fd_kvm [openat$kvm] ioctl$KVM_GET_MSR_INDEX_LIST : fd_kvm [openat$kvm] ioctl$KVM_GET_NESTED_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_NR_MMU_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_ONE_REG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_PIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_REG_LIST : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SREGS2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_STATS_FD_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_STATS_FD_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_SUPPORTED_CPUID : fd_kvm [openat$kvm] ioctl$KVM_GET_SUPPORTED_HV_CPUID_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SUPPORTED_HV_CPUID_sys : fd_kvm [openat$kvm] ioctl$KVM_GET_TSC_KHZ_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_TSC_KHZ_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_VCPU_EVENTS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_VCPU_MMAP_SIZE : fd_kvm [openat$kvm] ioctl$KVM_GET_XCRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_XSAVE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_XSAVE2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_HAS_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_HAS_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_HAS_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_HYPERV_EVENTFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_INTERRUPT : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_IOEVENTFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQ_LINE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQ_LINE_STATUS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_KVMCLOCK_CTRL : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_MEMORY_ENCRYPT_REG_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_MEMORY_ENCRYPT_UNREG_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_NMI : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_PPC_ALLOCATE_HTAB : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_PRE_FAULT_MEMORY : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_REGISTER_COALESCED_MMIO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_REINJECT_CONTROL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_RESET_DIRTY_RINGS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_RUN : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_S390_VCPU_FAULT : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_BOOT_CPU_ID : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_CLOCK : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_CPUID2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEBUGREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_SET_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_FPU : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_GSI_ROUTING : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_GUEST_DEBUG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_IDENTITY_MAP_ADDR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_LAPIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_MEMORY_ATTRIBUTES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_MP_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_MSRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_NESTED_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_NR_MMU_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_ONE_REG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_PIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SIGNAL_MASK : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SREGS2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_TSC_KHZ_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_TSC_KHZ_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_TSS_ADDR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_USER_MEMORY_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_USER_MEMORY_REGION2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_VAPIC_ADDR : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_VCPU_EVENTS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_XCRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_XSAVE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SEV_CERT_EXPORT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_DBG_DECRYPT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_DBG_ENCRYPT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_ES_INIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_GET_ATTESTATION_REPORT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_GUEST_STATUS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_INIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_INIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_MEASURE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_SECRET : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_CANCEL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_UPDATE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SIGNAL_MSI : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SMI : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_TPR_ACCESS_REPORTING : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_TRANSLATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_UNREGISTER_COALESCED_MMIO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_X86_GET_MCE_CAP_SUPPORTED : fd_kvm [openat$kvm] ioctl$KVM_X86_SETUP_MCE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_X86_SET_MCE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_X86_SET_MSR_FILTER : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_XEN_HVM_CONFIG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$PERF_EVENT_IOC_DISABLE : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_ENABLE : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_ID : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_MODIFY_ATTRIBUTES : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_PAUSE_OUTPUT : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_PERIOD : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_QUERY_BPF : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_REFRESH : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_RESET : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_BPF : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_FILTER : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_OUTPUT : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$READ_COUNTERS : fd_rdma [openat$uverbs0] ioctl$SNDRV_FIREWIRE_IOCTL_GET_INFO : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_LOCK : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_TASCAM_STATE : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_UNLOCK : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_DSP_LOAD : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_DSP_STATUS : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_INFO : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_PVERSION : fd_snd_hw [syz_open_dev$sndhw] ioctl$TE_IOCTL_CLOSE_CLIENT_SESSION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_LAUNCH_OPERATION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_OPEN_CLIENT_SESSION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_SS_CMD : fd_tlk [openat$tlk_device] ioctl$TIPC_IOC_CONNECT : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] ioctl$TIPC_IOC_CONNECT_avb : fd_trusty_avb [openat$trusty_avb] ioctl$TIPC_IOC_CONNECT_gatekeeper : fd_trusty_gatekeeper [openat$trusty_gatekeeper] ioctl$TIPC_IOC_CONNECT_hwkey : fd_trusty_hwkey [openat$trusty_hwkey] ioctl$TIPC_IOC_CONNECT_hwrng : fd_trusty_hwrng [openat$trusty_hwrng] ioctl$TIPC_IOC_CONNECT_keymaster_secure : fd_trusty_km_secure [openat$trusty_km_secure] ioctl$TIPC_IOC_CONNECT_km : fd_trusty_km [openat$trusty_km] ioctl$TIPC_IOC_CONNECT_storage : fd_trusty_storage [openat$trusty_storage] ioctl$VFIO_CHECK_EXTENSION : fd_vfio [openat$vfio] ioctl$VFIO_GET_API_VERSION : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_GET_INFO : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_MAP_DMA : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_UNMAP_DMA : fd_vfio [openat$vfio] ioctl$VFIO_SET_IOMMU : fd_vfio [openat$vfio] ioctl$VTPM_PROXY_IOC_NEW_DEV : fd_vtpm [openat$vtpm] ioctl$sock_bt_cmtp_CMTPCONNADD : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPCONNDEL : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPGETCONNINFO : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPGETCONNLIST : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] mmap$DRM_I915 : fd_i915 [openat$i915] mmap$DRM_MSM : fd_msm [openat$msm] mmap$KVM_VCPU : vcpu_mmap_size [ioctl$KVM_GET_VCPU_MMAP_SIZE] mmap$bifrost : fd_bifrost [openat$bifrost openat$mali] mmap$perf : fd_perf [perf_event_open perf_event_open$cgroup] pkey_free : pkey [pkey_alloc] pkey_mprotect : pkey [pkey_alloc] read$sndhw : fd_snd_hw [syz_open_dev$sndhw] read$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] recvmsg$hf : sock_hf [socket$hf] sendmsg$hf : sock_hf [socket$hf] setsockopt$inet6_dccp_buf : sock_dccp6 [socket$inet6_dccp] setsockopt$inet6_dccp_int : sock_dccp6 [socket$inet6_dccp] setsockopt$inet_dccp_buf : sock_dccp [socket$inet_dccp] setsockopt$inet_dccp_int : sock_dccp [socket$inet_dccp] syz_kvm_add_vcpu$x86 : kvm_syz_vm$x86 [syz_kvm_setup_syzos_vm$x86] syz_kvm_assert_syzos_uexit$x86 : kvm_run_ptr [mmap$KVM_VCPU] syz_kvm_setup_cpu$x86 : fd_kvmvm [ioctl$KVM_CREATE_VM] syz_kvm_setup_syzos_vm$x86 : fd_kvmvm [ioctl$KVM_CREATE_VM] syz_memcpy_off$KVM_EXIT_HYPERCALL : kvm_run_ptr [mmap$KVM_VCPU] syz_memcpy_off$KVM_EXIT_MMIO : kvm_run_ptr [mmap$KVM_VCPU] write$ALLOC_MW : fd_rdma [openat$uverbs0] write$ALLOC_PD : fd_rdma [openat$uverbs0] write$ATTACH_MCAST : fd_rdma [openat$uverbs0] write$CLOSE_XRCD : fd_rdma [openat$uverbs0] write$CREATE_AH : fd_rdma [openat$uverbs0] write$CREATE_COMP_CHANNEL : fd_rdma [openat$uverbs0] write$CREATE_CQ : fd_rdma [openat$uverbs0] write$CREATE_CQ_EX : fd_rdma [openat$uverbs0] write$CREATE_FLOW : fd_rdma [openat$uverbs0] write$CREATE_QP : fd_rdma [openat$uverbs0] write$CREATE_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$CREATE_SRQ : fd_rdma [openat$uverbs0] write$CREATE_WQ : fd_rdma [openat$uverbs0] write$DEALLOC_MW : fd_rdma [openat$uverbs0] write$DEALLOC_PD : fd_rdma [openat$uverbs0] write$DEREG_MR : fd_rdma [openat$uverbs0] write$DESTROY_AH : fd_rdma [openat$uverbs0] write$DESTROY_CQ : fd_rdma [openat$uverbs0] write$DESTROY_FLOW : fd_rdma [openat$uverbs0] write$DESTROY_QP : fd_rdma [openat$uverbs0] write$DESTROY_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$DESTROY_SRQ : fd_rdma [openat$uverbs0] write$DESTROY_WQ : fd_rdma [openat$uverbs0] write$DETACH_MCAST : fd_rdma [openat$uverbs0] write$MLX5_ALLOC_PD : fd_rdma [openat$uverbs0] write$MLX5_CREATE_CQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_DV_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_SRQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_WQ : fd_rdma [openat$uverbs0] write$MLX5_GET_CONTEXT : fd_rdma [openat$uverbs0] write$MLX5_MODIFY_WQ : fd_rdma [openat$uverbs0] write$MODIFY_QP : fd_rdma [openat$uverbs0] write$MODIFY_SRQ : fd_rdma [openat$uverbs0] write$OPEN_XRCD : fd_rdma [openat$uverbs0] write$POLL_CQ : fd_rdma [openat$uverbs0] write$POST_RECV : fd_rdma [openat$uverbs0] write$POST_SEND : fd_rdma [openat$uverbs0] write$POST_SRQ_RECV : fd_rdma [openat$uverbs0] write$QUERY_DEVICE_EX : fd_rdma [openat$uverbs0] write$QUERY_PORT : fd_rdma [openat$uverbs0] write$QUERY_QP : fd_rdma [openat$uverbs0] write$QUERY_SRQ : fd_rdma [openat$uverbs0] write$REG_MR : fd_rdma [openat$uverbs0] write$REQ_NOTIFY_CQ : fd_rdma [openat$uverbs0] write$REREG_MR : fd_rdma [openat$uverbs0] write$RESIZE_CQ : fd_rdma [openat$uverbs0] write$capi20 : fd_capi20 [openat$capi20] write$capi20_data : fd_capi20 [openat$capi20] write$damon_attrs : fd_damon_attrs [openat$damon_attrs] write$damon_contexts : fd_damon_contexts [openat$damon_mk_contexts openat$damon_rm_contexts] write$damon_init_regions : fd_damon_init_regions [openat$damon_init_regions] write$damon_monitor_on : fd_damon_monitor_on [openat$damon_monitor_on] write$damon_schemes : fd_damon_schemes [openat$damon_schemes] write$damon_target_ids : fd_damon_target_ids [openat$damon_target_ids] write$proc_reclaim : fd_proc_reclaim [openat$proc_reclaim] write$sndhw : fd_snd_hw [syz_open_dev$sndhw] write$sndhw_fireworks : fd_snd_hw [syz_open_dev$sndhw] write$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] write$trusty_avb : fd_trusty_avb [openat$trusty_avb] write$trusty_gatekeeper : fd_trusty_gatekeeper [openat$trusty_gatekeeper] write$trusty_hwkey : fd_trusty_hwkey [openat$trusty_hwkey] write$trusty_hwrng : fd_trusty_hwrng [openat$trusty_hwrng] write$trusty_km : fd_trusty_km [openat$trusty_km] write$trusty_km_secure : fd_trusty_km_secure [openat$trusty_km_secure] write$trusty_storage : fd_trusty_storage [openat$trusty_storage] BinFmtMisc : enabled Comparisons : enabled Coverage : enabled DelayKcovMmap : enabled DevlinkPCI : PCI device 0000:00:10.0 is not available ExtraCoverage : enabled Fault : enabled KCSAN : write(/sys/kernel/debug/kcsan, on) failed KcovResetIoctl : kernel does not support ioctl(KCOV_RESET_TRACE) LRWPANEmulation : enabled Leak : failed to write(kmemleak, "scan=off") NetDevices : enabled NetInjection : enabled NicVF : PCI device 0000:00:11.0 is not available SandboxAndroid : setfilecon: setxattr failed. (errno 1: Operation not permitted). . process exited with status 67. SandboxNamespace : enabled SandboxNone : enabled SandboxSetuid : enabled Swap : enabled USBEmulation : enabled VhciInjection : enabled WifiEmulation : enabled syscalls : 3832/8048 2025/08/16 18:59:03 base: machine check complete 2025/08/16 18:59:04 discovered 7699 source files, 338622 symbols 2025/08/16 18:59:05 coverage filter: __bio_release_pages: [__bio_release_pages] 2025/08/16 18:59:05 coverage filter: __dec_node_state: [] 2025/08/16 18:59:05 coverage filter: __f2fs_commit_super: [__f2fs_commit_super] 2025/08/16 18:59:05 coverage filter: __filemap_fdatawait_range: [__filemap_fdatawait_range] 2025/08/16 18:59:05 coverage filter: __filemap_get_folio: [__filemap_get_folio] 2025/08/16 18:59:05 coverage filter: __get_meta_folio: [__get_meta_folio] 2025/08/16 18:59:05 coverage filter: __get_metapage: [__get_metapage] 2025/08/16 18:59:05 coverage filter: __get_node_folio: [__get_node_folio] 2025/08/16 18:59:05 coverage filter: __inc_node_state: [] 2025/08/16 18:59:05 coverage filter: __mem_cgroup_try_charge_swap: [] 2025/08/16 18:59:05 coverage filter: __mem_cgroup_uncharge_swap: [] 2025/08/16 18:59:05 coverage filter: __memcg_kmem_charge_page: [] 2025/08/16 18:59:05 coverage filter: __memcg_kmem_uncharge_page: [] 2025/08/16 18:59:05 coverage filter: __mod_node_page_state: [] 2025/08/16 18:59:05 coverage filter: __node_get_memory_tier: [__node_get_memory_tier] 2025/08/16 18:59:05 coverage filter: __pfx_filemap_mod_uncharged_vmstat: [] 2025/08/16 18:59:05 coverage filter: __se_sys_cachestat: [__se_sys_cachestat] 2025/08/16 18:59:05 coverage filter: attr_wof_frame_info: [attr_wof_frame_info] 2025/08/16 18:59:05 coverage filter: bch2_mark_pagecache_reserved: [bch2_mark_pagecache_reserved] 2025/08/16 18:59:05 coverage filter: bch2_mark_pagecache_unallocated: [bch2_mark_pagecache_unallocated] 2025/08/16 18:59:05 coverage filter: bch2_page_mkwrite: [bch2_page_mkwrite] 2025/08/16 18:59:05 coverage filter: bch2_seek_pagecache_data: [bch2_seek_pagecache_data] 2025/08/16 18:59:05 coverage filter: bio_set_pages_dirty: [bio_set_pages_dirty] 2025/08/16 18:59:05 coverage filter: block_page_mkwrite: [block_page_mkwrite] 2025/08/16 18:59:05 coverage filter: btrfs_cleanup_bg_io: [btrfs_cleanup_bg_io] 2025/08/16 18:59:05 coverage filter: btrfs_cleanup_one_transaction: [btrfs_cleanup_one_transaction] 2025/08/16 18:59:05 coverage filter: btrfs_cleanup_transaction: [btrfs_cleanup_transaction] 2025/08/16 18:59:05 coverage filter: btrfs_clear_buffer_dirty: [btrfs_clear_buffer_dirty] 2025/08/16 18:59:05 coverage filter: btrfs_defrag_file: [btrfs_defrag_file] 2025/08/16 18:59:05 coverage filter: btrfs_drop_and_free_fs_root: [btrfs_drop_and_free_fs_root] 2025/08/16 18:59:05 coverage filter: btrfs_mark_buffer_dirty: [btrfs_mark_buffer_dirty] 2025/08/16 18:59:05 coverage filter: btrfs_page_mkwrite: [btrfs_page_mkwrite] 2025/08/16 18:59:05 coverage filter: btrfs_read_merkle_tree_page: [btrfs_read_merkle_tree_page] 2025/08/16 18:59:05 coverage filter: btrfs_replay_log: [btrfs_replay_log] 2025/08/16 18:59:05 coverage filter: btrfs_truncate_block: [btrfs_truncate_block] 2025/08/16 18:59:05 coverage filter: btrfs_writepage_fixup_worker: [btrfs_writepage_fixup_worker] 2025/08/16 18:59:05 coverage filter: btrfs_writepages: [btrfs_writepages] 2025/08/16 18:59:05 coverage filter: ceph_page_mkwrite: [ceph_page_mkwrite] 2025/08/16 18:59:05 coverage filter: ceph_uninline_data: [ceph_uninline_data] 2025/08/16 18:59:05 coverage filter: ceph_writepages_start: [ceph_writepages_start] 2025/08/16 18:59:05 coverage filter: clean_bdev_aliases: [clean_bdev_aliases] 2025/08/16 18:59:05 coverage filter: cpu_vm_stats_fold: [] 2025/08/16 18:59:05 coverage filter: delete_from_page_cache_batch: [delete_from_page_cache_batch] 2025/08/16 18:59:05 coverage filter: do_convert_inline_dir: [do_convert_inline_dir] 2025/08/16 18:59:05 coverage filter: do_huge_pmd_wp_page: [do_huge_pmd_wp_page] 2025/08/16 18:59:05 coverage filter: do_read_cache_folio: [do_read_cache_folio] 2025/08/16 18:59:05 coverage filter: do_swap_page: [do_swap_page] 2025/08/16 18:59:05 coverage filter: do_wp_page: [do_wp_page] 2025/08/16 18:59:05 coverage filter: drain_obj_stock: [] 2025/08/16 18:59:05 coverage filter: ecryptfs_write: [ecryptfs_write ecryptfs_write_begin ecryptfs_write_crypt_stat_flags ecryptfs_write_end ecryptfs_write_header_metadata ecryptfs_write_inode_size_to_metadata ecryptfs_write_lower ecryptfs_write_lower_page_segment ecryptfs_write_metadata ecryptfs_write_packet_length ecryptfs_write_tag_70_packet ecryptfs_writepages] 2025/08/16 18:59:05 coverage filter: ext4_page_mkwrite: [ext4_page_mkwrite] 2025/08/16 18:59:05 coverage filter: ext4_write_begin: [__probestub_ext4_write_begin __traceiter_ext4_write_begin ext4_write_begin] 2025/08/16 18:59:05 coverage filter: f2fs_delete_entry: [f2fs_delete_entry] 2025/08/16 18:59:05 coverage filter: f2fs_delete_inline_entry: [f2fs_delete_inline_entry] 2025/08/16 18:59:05 coverage filter: f2fs_flush_inline_data: [f2fs_flush_inline_data] 2025/08/16 18:59:05 coverage filter: f2fs_fsync_node_pages: [f2fs_fsync_node_pages] 2025/08/16 18:59:05 coverage filter: f2fs_get_lock_data_folio: [f2fs_get_lock_data_folio] 2025/08/16 18:59:05 coverage filter: f2fs_init_inode_metadata: [f2fs_init_inode_metadata] 2025/08/16 18:59:05 coverage filter: f2fs_invalidate_compress_pages: [f2fs_invalidate_compress_pages f2fs_invalidate_compress_pages_range] 2025/08/16 18:59:05 coverage filter: f2fs_quota_read: [f2fs_quota_read] 2025/08/16 18:59:05 coverage filter: f2fs_recover_fsync_data: [f2fs_recover_fsync_data] 2025/08/16 18:59:05 coverage filter: f2fs_set_link: [f2fs_set_link] 2025/08/16 18:59:05 coverage filter: f2fs_sync_meta_pages: [f2fs_sync_meta_pages] 2025/08/16 18:59:05 coverage filter: f2fs_sync_node_pages: [f2fs_sync_node_pages] 2025/08/16 18:59:05 coverage filter: f2fs_truncate_inode_blocks: [__probestub_f2fs_truncate_inode_blocks_enter __probestub_f2fs_truncate_inode_blocks_exit __traceiter_f2fs_truncate_inode_blocks_enter __traceiter_f2fs_truncate_inode_blocks_exit f2fs_truncate_inode_blocks trace_f2fs_truncate_inode_blocks_exit] 2025/08/16 18:59:05 coverage filter: f2fs_vm_page_mkwrite: [__probestub_f2fs_vm_page_mkwrite __traceiter_f2fs_vm_page_mkwrite f2fs_vm_page_mkwrite] 2025/08/16 18:59:05 coverage filter: f2fs_write_begin: [__bpf_trace_f2fs_write_begin __probestub_f2fs_write_begin __traceiter_f2fs_write_begin f2fs_write_begin perf_trace_f2fs_write_begin trace_event_raw_event_f2fs_write_begin trace_raw_output_f2fs_write_begin] 2025/08/16 18:59:05 coverage filter: f2fs_write_data_pages: [f2fs_write_data_pages] 2025/08/16 18:59:05 coverage filter: f2fs_write_multi_pages: [f2fs_write_multi_pages] 2025/08/16 18:59:05 coverage filter: fb_deferred_io_mkwrite: [fb_deferred_io_mkwrite] 2025/08/16 18:59:05 coverage filter: filemap_add_folio: [__filemap_add_folio filemap_add_folio] 2025/08/16 18:59:05 coverage filter: filemap_fault: [__bpf_trace_mm_filemap_fault __probestub_f2fs_filemap_fault __probestub_mm_filemap_fault __traceiter_f2fs_filemap_fault __traceiter_mm_filemap_fault ceph_filemap_fault f2fs_filemap_fault filemap_fault filemap_fault_recheck_pte_none perf_trace_mm_filemap_fault trace_event_raw_event_mm_filemap_fault trace_raw_output_mm_filemap_fault xfs_filemap_fault] 2025/08/16 18:59:05 coverage filter: filemap_get_entry: [filemap_get_entry] 2025/08/16 18:59:05 coverage filter: filemap_get_folios_contig: [filemap_get_folios_contig] 2025/08/16 18:59:05 coverage filter: filemap_get_folios_tag: [filemap_get_folios_tag] 2025/08/16 18:59:05 coverage filter: filemap_get_read_batch: [filemap_get_read_batch] 2025/08/16 18:59:05 coverage filter: filemap_mod_uncharged_vmstat: [filemap_mod_uncharged_vmstat] 2025/08/16 18:59:05 coverage filter: filemap_page_mkwrite: [filemap_page_mkwrite xfs_filemap_page_mkwrite zonefs_filemap_page_mkwrite] 2025/08/16 18:59:05 coverage filter: filemap_read: [filemap_read filemap_read_folio] 2025/08/16 18:59:05 coverage filter: filemap_splice_read: [__probestub_filemap_splice_read_ret __traceiter_filemap_splice_read_ret filemap_splice_read] 2025/08/16 18:59:05 coverage filter: filemap_unaccount_folio: [filemap_unaccount_folio] 2025/08/16 18:59:05 coverage filter: find_get_block_common: [find_get_block_common] 2025/08/16 18:59:05 coverage filter: find_get_entries: [find_get_entries] 2025/08/16 18:59:05 coverage filter: find_lock_entries: [find_lock_entries] 2025/08/16 18:59:05 coverage filter: folio_lock: [__folio_lock __folio_lock_killable __folio_lock_or_retry folio_lock folio_lock folio_lock folio_lock folio_lock folio_lock folio_lock folio_lock_anon_vma_read] 2025/08/16 18:59:05 coverage filter: folio_mark_dirty_lock: [folio_mark_dirty_lock] 2025/08/16 18:59:05 coverage filter: force_metapage: [force_metapage] 2025/08/16 18:59:05 coverage filter: free_percpu: [__bpf_trace_percpu_free_percpu __free_percpu_irq __probestub_percpu_free_percpu __traceiter_percpu_free_percpu devm_free_percpu free_percpu free_percpu_irq free_percpu_nmi metadata_dst_free_percpu perf_trace_percpu_free_percpu trace_event_raw_event_percpu_free_percpu trace_percpu_free_percpu trace_raw_output_percpu_free_percpu] 2025/08/16 18:59:05 coverage filter: fuse_page_mkwrite: [fuse_page_mkwrite] 2025/08/16 18:59:05 coverage filter: generic_perform_write: [generic_perform_write] 2025/08/16 18:59:05 coverage filter: generic_pipe_buf_try_steal: [generic_pipe_buf_try_steal] 2025/08/16 18:59:05 coverage filter: gfs2_page_mkwrite: [gfs2_page_mkwrite] 2025/08/16 18:59:05 coverage filter: gfs2_trans_add_meta: [gfs2_trans_add_meta] 2025/08/16 18:59:05 coverage filter: gfs2_write_cache_jdata: [gfs2_write_cache_jdata] 2025/08/16 18:59:05 coverage filter: grab_metapage: [grab_metapage] 2025/08/16 18:59:05 coverage filter: hold_metapage: [hold_metapage] 2025/08/16 18:59:05 coverage filter: invalidate_inode_pages2_range: [invalidate_inode_pages2_range] 2025/08/16 18:59:05 coverage filter: io_ctl_prepare_pages: [io_ctl_prepare_pages] 2025/08/16 18:59:05 coverage filter: iomap_page_mkwrite: [iomap_page_mkwrite] 2025/08/16 18:59:05 coverage filter: ksm_do_scan: [ksm_do_scan] 2025/08/16 18:59:05 coverage filter: ksm_get_folio: [ksm_get_folio] 2025/08/16 18:59:05 coverage filter: lock_delalloc_folios: [lock_delalloc_folios] 2025/08/16 18:59:05 coverage filter: lock_metapage: [lock_metapage] 2025/08/16 18:59:05 coverage filter: lock_page: [lock_page lock_page lock_page trylock_page unlock_page] 2025/08/16 18:59:05 coverage filter: lruvec_page_state: [] 2025/08/16 18:59:05 coverage filter: lruvec_page_state_local: [] 2025/08/16 18:59:05 coverage filter: make_device_exclusive: [make_device_exclusive] 2025/08/16 18:59:05 coverage filter: mapping_seek_hole_data: [mapping_seek_hole_data] 2025/08/16 18:59:05 coverage filter: mem_cgroup_charge_skmem: [] 2025/08/16 18:59:05 coverage filter: mem_cgroup_uncharge_skmem: [] 2025/08/16 18:59:05 coverage filter: memcg1_stat_format: [] 2025/08/16 18:59:05 coverage filter: memcg1_swapout: [] 2025/08/16 18:59:05 coverage filter: memcg_page_state: [mod_memcg_page_state] 2025/08/16 18:59:05 coverage filter: memcg_page_state_local: [] 2025/08/16 18:59:05 coverage filter: memcg_page_state_local_output: [] 2025/08/16 18:59:05 coverage filter: memcg_page_state_output: [] 2025/08/16 18:59:05 coverage filter: memory_numa_stat_show: [] 2025/08/16 18:59:05 coverage filter: memory_stat_format: [] 2025/08/16 18:59:05 coverage filter: memtier_hotplug_callback: [memtier_hotplug_callback] 2025/08/16 18:59:05 coverage filter: migrate_device_coherent_folio: [migrate_device_coherent_folio] 2025/08/16 18:59:05 coverage filter: migrate_pages: [__bpf_trace_mm_migrate_pages __bpf_trace_mm_migrate_pages_start __ia32_sys_migrate_pages __probestub_mm_migrate_pages __probestub_mm_migrate_pages_start __se_sys_migrate_pages __traceiter_mm_migrate_pages __traceiter_mm_migrate_pages_start __x64_sys_migrate_pages damon_migrate_pages do_migrate_pages migrate_pages migrate_pages_batch perf_trace_mm_migrate_pages perf_trace_mm_migrate_pages_start trace_event_raw_event_mm_migrate_pages trace_event_raw_event_mm_migrate_pages_start trace_raw_output_mm_migrate_pages trace_raw_output_mm_migrate_pages_start] 2025/08/16 18:59:05 coverage filter: migrate_pages_batch: [] 2025/08/16 18:59:05 coverage filter: minix_add_link: [minix_add_link] 2025/08/16 18:59:05 coverage filter: minix_delete_entry: [minix_delete_entry] 2025/08/16 18:59:05 coverage filter: minix_set_link: [minix_set_link] 2025/08/16 18:59:05 coverage filter: mod_memcg_lruvec_state: [] 2025/08/16 18:59:05 coverage filter: mod_memcg_page_state: [] 2025/08/16 18:59:05 coverage filter: mod_memcg_state: [] 2025/08/16 18:59:05 coverage filter: mod_node_state: [] 2025/08/16 18:59:05 coverage filter: move_pages_huge_pmd: [move_pages_huge_pmd] 2025/08/16 18:59:05 coverage filter: move_pages_pte: [move_pages_pte] 2025/08/16 18:59:05 coverage filter: mpage_prepare_extent_to_map: [mpage_prepare_extent_to_map] 2025/08/16 18:59:05 coverage filter: need_update: [] 2025/08/16 18:59:05 coverage filter: netfs_page_mkwrite: [netfs_page_mkwrite] 2025/08/16 18:59:05 coverage filter: next_uptodate_folio: [next_uptodate_folio] 2025/08/16 18:59:05 coverage filter: nfs_vm_page_mkwrite: [nfs_vm_page_mkwrite] 2025/08/16 18:59:05 coverage filter: nilfs_add_link: [nilfs_add_link] 2025/08/16 18:59:05 coverage filter: nilfs_begin_folio_io: [nilfs_begin_folio_io] 2025/08/16 18:59:05 coverage filter: nilfs_btnode_delete: [nilfs_btnode_delete] 2025/08/16 18:59:05 coverage filter: nilfs_btnode_prepare_change_key: [nilfs_btnode_prepare_change_key] 2025/08/16 18:59:05 coverage filter: nilfs_clear_dirty_pages: [nilfs_clear_dirty_pages] 2025/08/16 18:59:05 coverage filter: nilfs_copy_back_pages: [nilfs_copy_back_pages] 2025/08/16 18:59:05 coverage filter: nilfs_copy_dirty_pages: [nilfs_copy_dirty_pages] 2025/08/16 18:59:05 coverage filter: nilfs_delete_entry: [nilfs_delete_entry] 2025/08/16 18:59:05 coverage filter: nilfs_end_folio_io: [nilfs_end_folio_io] 2025/08/16 18:59:05 coverage filter: nilfs_find_uncommitted_extent: [nilfs_find_uncommitted_extent] 2025/08/16 18:59:05 coverage filter: nilfs_lookup_dirty_data_buffers: [nilfs_lookup_dirty_data_buffers] 2025/08/16 18:59:05 coverage filter: nilfs_page_mkwrite: [nilfs_page_mkwrite] 2025/08/16 18:59:05 coverage filter: nilfs_segctor_do_construct: [nilfs_segctor_do_construct] 2025/08/16 18:59:05 coverage filter: nilfs_set_link: [nilfs_set_link] 2025/08/16 18:59:05 coverage filter: node_get_allowed_targets: [node_get_allowed_targets] 2025/08/16 18:59:05 coverage filter: node_is_toptier: [node_is_toptier] 2025/08/16 18:59:05 coverage filter: node_page_state: [] 2025/08/16 18:59:05 coverage filter: node_page_state_add: [] 2025/08/16 18:59:05 coverage filter: node_page_state_pages: [] 2025/08/16 18:59:05 coverage filter: node_read_vmstat: [node_read_vmstat] 2025/08/16 18:59:05 coverage filter: ntfs_bio_fill_1: [ntfs_bio_fill_1] 2025/08/16 18:59:05 coverage filter: nvmet_execute_disc_identify: [nvmet_execute_disc_identify] 2025/08/16 18:59:05 coverage filter: obj_cgroup_charge_pages: [] 2025/08/16 18:59:05 coverage filter: obj_cgroup_charge_zswap: [] 2025/08/16 18:59:05 coverage filter: obj_cgroup_may_zswap: [] 2025/08/16 18:59:05 coverage filter: obj_cgroup_release: [] 2025/08/16 18:59:05 coverage filter: obj_cgroup_uncharge_zswap: [] 2025/08/16 18:59:05 coverage filter: ocfs2_duplicate_clusters_by_page: [__probestub_ocfs2_duplicate_clusters_by_page __traceiter_ocfs2_duplicate_clusters_by_page ocfs2_duplicate_clusters_by_page trace_ocfs2_duplicate_clusters_by_page] 2025/08/16 18:59:05 coverage filter: ocfs2_grab_folios_for_write: [ocfs2_grab_folios_for_write] 2025/08/16 18:59:05 coverage filter: orangefs_page_mkwrite: [orangefs_page_mkwrite] 2025/08/16 18:59:05 coverage filter: page_cache_pipe_buf_confirm: [page_cache_pipe_buf_confirm] 2025/08/16 18:59:05 coverage filter: page_cache_pipe_buf_try_steal: [page_cache_pipe_buf_try_steal] 2025/08/16 18:59:05 coverage filter: pagetypeinfo_showfree_print: [] 2025/08/16 18:59:05 coverage filter: pcpu_memcg_post_alloc_hook: [pcpu_memcg_post_alloc_hook] 2025/08/16 18:59:05 coverage filter: prepare_one_folio: [prepare_one_folio] 2025/08/16 18:59:05 coverage filter: putback_movable_ops_page: [putback_movable_ops_page] 2025/08/16 18:59:05 coverage filter: refill_obj_stock: [] 2025/08/16 18:59:05 coverage filter: refresh_cpu_vm_stats: [] 2025/08/16 18:59:05 coverage filter: release_metapage: [release_metapage] 2025/08/16 18:59:05 coverage filter: relocate_file_extent_cluster: [relocate_file_extent_cluster] 2025/08/16 18:59:05 coverage filter: remove_device_exclusive_entry: [remove_device_exclusive_entry] 2025/08/16 18:59:05 coverage filter: remove_inode_hugepages: [remove_inode_hugepages] 2025/08/16 18:59:05 coverage filter: send_extent_data: [send_extent_data] 2025/08/16 18:59:05 coverage filter: set_extent_buffer_dirty: [set_extent_buffer_dirty] 2025/08/16 18:59:05 coverage filter: set_node_memory_tier: [set_node_memory_tier] 2025/08/16 18:59:05 coverage filter: shmem_get_folio_gfp: [shmem_get_folio_gfp] 2025/08/16 18:59:05 coverage filter: shmem_get_partial_folio: [shmem_get_partial_folio] 2025/08/16 18:59:05 coverage filter: shmem_swapin_folio: [shmem_swapin_folio] 2025/08/16 18:59:05 coverage filter: shmem_undo_range: [shmem_undo_range] 2025/08/16 18:59:05 coverage filter: shrink_folio_list: [shrink_folio_list] 2025/08/16 18:59:05 coverage filter: split_huge_pages_all: [split_huge_pages_all] 2025/08/16 18:59:05 coverage filter: truncate_inode_pages_range: [truncate_inode_pages_range] 2025/08/16 18:59:05 coverage filter: try_split_folio: [try_split_folio] 2025/08/16 18:59:05 coverage filter: try_to_unuse: [try_to_unuse] 2025/08/16 18:59:05 coverage filter: ttm_backup_backup_page: [ttm_backup_backup_page] 2025/08/16 18:59:05 coverage filter: txLock: [txLock txLockAlloc] 2025/08/16 18:59:05 coverage filter: ubifs_vm_page_mkwrite: [ubifs_vm_page_mkwrite] 2025/08/16 18:59:05 coverage filter: udf_expand_file_adinicb: [udf_expand_file_adinicb] 2025/08/16 18:59:05 coverage filter: udf_page_mkwrite: [udf_page_mkwrite] 2025/08/16 18:59:05 coverage filter: ufs_add_link: [ufs_add_link] 2025/08/16 18:59:05 coverage filter: ufs_delete_entry: [ufs_delete_entry] 2025/08/16 18:59:05 coverage filter: ufs_get_locked_folio: [ufs_get_locked_folio] 2025/08/16 18:59:05 coverage filter: ufs_set_link: [ufs_set_link] 2025/08/16 18:59:05 coverage filter: uncharge_batch: [] 2025/08/16 18:59:05 coverage filter: unpin_user_page_range_dirty_lock: [unpin_user_page_range_dirty_lock] 2025/08/16 18:59:05 coverage filter: unpin_user_pages_dirty_lock: [unpin_user_pages_dirty_lock] 2025/08/16 18:59:05 coverage filter: vfs_dedupe_file_range_compare: [vfs_dedupe_file_range_compare] 2025/08/16 18:59:05 coverage filter: vmstat_next: [] 2025/08/16 18:59:05 coverage filter: vmstat_refresh: [] 2025/08/16 18:59:05 coverage filter: vmstat_shepherd: [] 2025/08/16 18:59:05 coverage filter: vmstat_show: [] 2025/08/16 18:59:05 coverage filter: vmstat_start: [] 2025/08/16 18:59:05 coverage filter: write_all_supers: [write_all_supers] 2025/08/16 18:59:05 coverage filter: write_one_eb: [write_one_eb] 2025/08/16 18:59:05 coverage filter: writeback_iter: [writeback_iter] 2025/08/16 18:59:05 coverage filter: z_erofs_runqueue: [z_erofs_runqueue] 2025/08/16 18:59:05 coverage filter: zone_device_page_init: [zone_device_page_init] 2025/08/16 18:59:05 coverage filter: zoneinfo_show_print: [] 2025/08/16 18:59:05 coverage filter: zswap_current_read: [] 2025/08/16 18:59:05 coverage filter: zswap_shrinker_count: [zswap_shrinker_count] 2025/08/16 18:59:05 coverage filter: fs/btrfs/disk-io.c: [fs/btrfs/disk-io.c] 2025/08/16 18:59:05 coverage filter: include/linux/mmzone.h: [] 2025/08/16 18:59:05 coverage filter: include/linux/pagemap.h: [] 2025/08/16 18:59:05 coverage filter: mm/filemap.c: [mm/filemap.c] 2025/08/16 18:59:05 coverage filter: mm/vmstat.c: [] 2025/08/16 18:59:05 area "symbols": 15928 PCs in the cover filter 2025/08/16 18:59:05 area "files": 5774 PCs in the cover filter 2025/08/16 18:59:05 area "": 0 PCs in the cover filter 2025/08/16 18:59:05 executor cover filter: 0 PCs 2025/08/16 18:59:09 machine check: disabled the following syscalls: fsetxattr$security_selinux : selinux is not enabled fsetxattr$security_smack_transmute : smack is not enabled fsetxattr$smack_xattr_label : smack is not enabled get_thread_area : syscall get_thread_area is not present lookup_dcookie : syscall lookup_dcookie is not present lsetxattr$security_selinux : selinux is not enabled lsetxattr$security_smack_transmute : smack is not enabled lsetxattr$smack_xattr_label : smack is not enabled mount$esdfs : /proc/filesystems does not contain esdfs mount$incfs : /proc/filesystems does not contain incremental-fs openat$acpi_thermal_rel : failed to open /dev/acpi_thermal_rel: no such file or directory openat$ashmem : failed to open /dev/ashmem: no such file or directory openat$bifrost : failed to open /dev/bifrost: no such file or directory openat$binder : failed to open /dev/binder: no such file or directory openat$camx : failed to open /dev/v4l/by-path/platform-soc@0:qcom_cam-req-mgr-video-index0: no such file or directory openat$capi20 : failed to open /dev/capi20: no such file or directory openat$cdrom1 : failed to open /dev/cdrom1: no such file or directory openat$damon_attrs : failed to open /sys/kernel/debug/damon/attrs: no such file or directory openat$damon_init_regions : failed to open /sys/kernel/debug/damon/init_regions: no such file or directory openat$damon_kdamond_pid : failed to open /sys/kernel/debug/damon/kdamond_pid: no such file or directory openat$damon_mk_contexts : failed to open /sys/kernel/debug/damon/mk_contexts: no such file or directory openat$damon_monitor_on : failed to open /sys/kernel/debug/damon/monitor_on: no such file or directory openat$damon_rm_contexts : failed to open /sys/kernel/debug/damon/rm_contexts: no such file or directory openat$damon_schemes : failed to open /sys/kernel/debug/damon/schemes: no such file or directory openat$damon_target_ids : failed to open /sys/kernel/debug/damon/target_ids: no such file or directory openat$hwbinder : failed to open /dev/hwbinder: no such file or directory openat$i915 : failed to open /dev/i915: no such file or directory openat$img_rogue : failed to open /dev/img-rogue: no such file or directory openat$irnet : failed to open /dev/irnet: no such file or directory openat$keychord : failed to open /dev/keychord: no such file or directory openat$kvm : failed to open /dev/kvm: no such file or directory openat$lightnvm : failed to open /dev/lightnvm/control: no such file or directory openat$mali : failed to open /dev/mali0: no such file or directory openat$md : failed to open /dev/md0: no such file or directory openat$msm : failed to open /dev/msm: no such file or directory openat$ndctl0 : failed to open /dev/ndctl0: no such file or directory openat$nmem0 : failed to open /dev/nmem0: no such file or directory openat$pktcdvd : failed to open /dev/pktcdvd/control: no such file or directory openat$pmem0 : failed to open /dev/pmem0: no such file or directory openat$proc_capi20 : failed to open /proc/capi/capi20: no such file or directory openat$proc_capi20ncci : failed to open /proc/capi/capi20ncci: no such file or directory openat$proc_reclaim : failed to open /proc/self/reclaim: no such file or directory openat$ptp1 : failed to open /dev/ptp1: no such file or directory openat$rnullb : failed to open /dev/rnullb0: no such file or directory openat$selinux_access : failed to open /selinux/access: no such file or directory openat$selinux_attr : selinux is not enabled openat$selinux_avc_cache_stats : failed to open /selinux/avc/cache_stats: no such file or directory openat$selinux_avc_cache_threshold : failed to open /selinux/avc/cache_threshold: no such file or directory openat$selinux_avc_hash_stats : failed to open /selinux/avc/hash_stats: no such file or directory openat$selinux_checkreqprot : failed to open /selinux/checkreqprot: no such file or directory openat$selinux_commit_pending_bools : failed to open /selinux/commit_pending_bools: no such file or directory openat$selinux_context : failed to open /selinux/context: no such file or directory openat$selinux_create : failed to open /selinux/create: no such file or directory openat$selinux_enforce : failed to open /selinux/enforce: no such file or directory openat$selinux_load : failed to open /selinux/load: no such file or directory openat$selinux_member : failed to open /selinux/member: no such file or directory openat$selinux_mls : failed to open /selinux/mls: no such file or directory openat$selinux_policy : failed to open /selinux/policy: no such file or directory openat$selinux_relabel : failed to open /selinux/relabel: no such file or directory openat$selinux_status : failed to open /selinux/status: no such file or directory openat$selinux_user : failed to open /selinux/user: no such file or directory openat$selinux_validatetrans : failed to open /selinux/validatetrans: no such file or directory openat$sev : failed to open /dev/sev: no such file or directory openat$sgx_provision : failed to open /dev/sgx_provision: no such file or directory openat$smack_task_current : smack is not enabled openat$smack_thread_current : smack is not enabled openat$smackfs_access : failed to open /sys/fs/smackfs/access: no such file or directory openat$smackfs_ambient : failed to open /sys/fs/smackfs/ambient: no such file or directory openat$smackfs_change_rule : failed to open /sys/fs/smackfs/change-rule: no such file or directory openat$smackfs_cipso : failed to open /sys/fs/smackfs/cipso: no such file or directory openat$smackfs_cipsonum : failed to open /sys/fs/smackfs/direct: no such file or directory openat$smackfs_ipv6host : failed to open /sys/fs/smackfs/ipv6host: no such file or directory openat$smackfs_load : failed to open /sys/fs/smackfs/load: no such file or directory openat$smackfs_logging : failed to open /sys/fs/smackfs/logging: no such file or directory openat$smackfs_netlabel : failed to open /sys/fs/smackfs/netlabel: no such file or directory openat$smackfs_onlycap : failed to open /sys/fs/smackfs/onlycap: no such file or directory openat$smackfs_ptrace : failed to open /sys/fs/smackfs/ptrace: no such file or directory openat$smackfs_relabel_self : failed to open /sys/fs/smackfs/relabel-self: no such file or directory openat$smackfs_revoke_subject : failed to open /sys/fs/smackfs/revoke-subject: no such file or directory openat$smackfs_syslog : failed to open /sys/fs/smackfs/syslog: no such file or directory openat$smackfs_unconfined : failed to open /sys/fs/smackfs/unconfined: no such file or directory openat$tlk_device : failed to open /dev/tlk_device: no such file or directory openat$trusty : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_avb : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_gatekeeper : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwkey : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwrng : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km_secure : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_storage : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$tty : failed to open /dev/tty: no such device or address openat$uverbs0 : failed to open /dev/infiniband/uverbs0: no such file or directory openat$vfio : failed to open /dev/vfio/vfio: no such file or directory openat$vndbinder : failed to open /dev/vndbinder: no such file or directory openat$vtpm : failed to open /dev/vtpmx: no such file or directory openat$xenevtchn : failed to open /dev/xen/evtchn: no such file or directory openat$zygote : failed to open /dev/socket/zygote: no such file or directory pkey_alloc : pkey_alloc(0x0, 0x0) failed: no space left on device read$smackfs_access : smack is not enabled read$smackfs_cipsonum : smack is not enabled read$smackfs_logging : smack is not enabled read$smackfs_ptrace : smack is not enabled set_thread_area : syscall set_thread_area is not present setxattr$security_selinux : selinux is not enabled setxattr$security_smack_transmute : smack is not enabled setxattr$smack_xattr_label : smack is not enabled socket$hf : socket$hf(0x13, 0x2, 0x0) failed: address family not supported by protocol socket$inet6_dccp : socket$inet6_dccp(0xa, 0x6, 0x0) failed: socket type not supported socket$inet_dccp : socket$inet_dccp(0x2, 0x6, 0x0) failed: socket type not supported socket$vsock_dgram : socket$vsock_dgram(0x28, 0x2, 0x0) failed: no such device syz_btf_id_by_name$bpf_lsm : failed to open /sys/kernel/btf/vmlinux: no such file or directory syz_init_net_socket$bt_cmtp : syz_init_net_socket$bt_cmtp(0x1f, 0x3, 0x5) failed: protocol not supported syz_kvm_setup_cpu$ppc64 : unsupported arch syz_mount_image$ntfs : /proc/filesystems does not contain ntfs syz_mount_image$reiserfs : /proc/filesystems does not contain reiserfs syz_mount_image$sysv : /proc/filesystems does not contain sysv syz_mount_image$v7 : /proc/filesystems does not contain v7 syz_open_dev$dricontrol : failed to open /dev/dri/controlD#: no such file or directory syz_open_dev$drirender : failed to open /dev/dri/renderD#: no such file or directory syz_open_dev$floppy : failed to open /dev/fd#: no such file or directory syz_open_dev$ircomm : failed to open /dev/ircomm#: no such file or directory syz_open_dev$sndhw : failed to open /dev/snd/hwC#D#: no such file or directory syz_pkey_set : pkey_alloc(0x0, 0x0) failed: no space left on device uselib : syscall uselib is not present write$selinux_access : selinux is not enabled write$selinux_attr : selinux is not enabled write$selinux_context : selinux is not enabled write$selinux_create : selinux is not enabled write$selinux_load : selinux is not enabled write$selinux_user : selinux is not enabled write$selinux_validatetrans : selinux is not enabled write$smack_current : smack is not enabled write$smackfs_access : smack is not enabled write$smackfs_change_rule : smack is not enabled write$smackfs_cipso : smack is not enabled write$smackfs_cipsonum : smack is not enabled write$smackfs_ipv6host : smack is not enabled write$smackfs_label : smack is not enabled write$smackfs_labels_list : smack is not enabled write$smackfs_load : smack is not enabled write$smackfs_logging : smack is not enabled write$smackfs_netlabel : smack is not enabled write$smackfs_ptrace : smack is not enabled transitively disabled the following syscalls (missing resource [creating syscalls]): bind$vsock_dgram : sock_vsock_dgram [socket$vsock_dgram] close$ibv_device : fd_rdma [openat$uverbs0] connect$hf : sock_hf [socket$hf] connect$vsock_dgram : sock_vsock_dgram [socket$vsock_dgram] getsockopt$inet6_dccp_buf : sock_dccp6 [socket$inet6_dccp] getsockopt$inet6_dccp_int : sock_dccp6 [socket$inet6_dccp] getsockopt$inet_dccp_buf : sock_dccp [socket$inet_dccp] getsockopt$inet_dccp_int : sock_dccp [socket$inet_dccp] ioctl$ACPI_THERMAL_GET_ART : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_ART_COUNT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_ART_LEN : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT_COUNT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT_LEN : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ASHMEM_GET_NAME : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_PIN_STATUS : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_PROT_MASK : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_SIZE : fd_ashmem [openat$ashmem] ioctl$ASHMEM_PURGE_ALL_CACHES : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_NAME : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_PROT_MASK : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_SIZE : fd_ashmem [openat$ashmem] ioctl$CAPI_CLR_FLAGS : fd_capi20 [openat$capi20] ioctl$CAPI_GET_ERRCODE : fd_capi20 [openat$capi20] ioctl$CAPI_GET_FLAGS : fd_capi20 [openat$capi20] ioctl$CAPI_GET_MANUFACTURER : fd_capi20 [openat$capi20] ioctl$CAPI_GET_PROFILE : fd_capi20 [openat$capi20] ioctl$CAPI_GET_SERIAL : fd_capi20 [openat$capi20] ioctl$CAPI_INSTALLED : fd_capi20 [openat$capi20] ioctl$CAPI_MANUFACTURER_CMD : fd_capi20 [openat$capi20] ioctl$CAPI_NCCI_GETUNIT : fd_capi20 [openat$capi20] ioctl$CAPI_NCCI_OPENCOUNT : fd_capi20 [openat$capi20] ioctl$CAPI_REGISTER : fd_capi20 [openat$capi20] ioctl$CAPI_SET_FLAGS : fd_capi20 [openat$capi20] ioctl$CREATE_COUNTERS : fd_rdma [openat$uverbs0] ioctl$DESTROY_COUNTERS : fd_rdma [openat$uverbs0] ioctl$DRM_IOCTL_I915_GEM_BUSY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_DESTROY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER2 : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER2_WR : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_APERTURE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_CACHING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_TILING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MADVISE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP_GTT : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP_OFFSET : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PREAD : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PWRITE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_CACHING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_DOMAIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_TILING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SW_FINISH : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_THROTTLE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_UNPIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_USERPTR : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_VM_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_VM_DESTROY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_WAIT : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GET_RESET_STATS : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_OVERLAY_ATTRS : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_OVERLAY_PUT_IMAGE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_ADD_CONFIG : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_OPEN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_REMOVE_CONFIG : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_QUERY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_REG_READ : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_SET_SPRITE_COLORKEY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_MSM_GEM_CPU_FINI : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_CPU_PREP : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_INFO : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_MADVISE : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_NEW : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_SUBMIT : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GET_PARAM : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SET_PARAM : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_NEW : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_QUERY : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_WAIT_FENCE : fd_msm [openat$msm] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPEXEC: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_HTBUFFER_HTBLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_CHANGESPARSEMEM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTPIN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMREXPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRGETUID: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRIMPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNREFPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLWRITEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_CONNECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_DISCONNECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMSET: fd_rogue [openat$img_rogue] ioctl$FLOPPY_FDCLRPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDDEFPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDEJECT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFLUSH : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTBEG : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTEND : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTTRK : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVTYP : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETFDCSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETMAXERRS : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDMSGOFF : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDMSGON : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDPOLLDRVSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDRAWCMD : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDRESET : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETDRVPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETEMSGTRESH : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETMAXERRS : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDTWADDLE : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDWERRORCLR : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDWERRORGET : fd_floppy [syz_open_dev$floppy] ioctl$KBASE_HWCNT_READER_CLEAR : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_DISABLE_EVENT : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_DUMP : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_ENABLE_EVENT : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_API_VERSION : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_API_VERSION_WITH_FEATURES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER_SIZE : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER_WITH_CYCLES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_HWVER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_PUT_BUFFER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_PUT_BUFFER_WITH_CYCLES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_SET_INTERVAL : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_IOCTL_BUFFER_LIVENESS_UPDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CONTEXT_PRIORITY_CHECK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_CPU_QUEUE_DUMP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_EVENT_SIGNAL : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_GET_GLB_IFACE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_BIND : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_KICK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_REGISTER : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_REGISTER_EX : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_TERMINATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_TERM : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_DISJOINT_QUERY : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_FENCE_VALIDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_CONTEXT_ID : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_CPU_GPU_TIMEINFO : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_DDK_VERSION : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_GPUPROPS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_CLEAR : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_DUMP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_ENABLE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_READER_SETUP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_SET : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_JOB_SUBMIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_DELETE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_ENQUEUE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_CMD : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_GET_SAMPLE : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_PUT_SAMPLE : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALIAS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALLOC : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALLOC_EX : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_COMMIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_EXEC_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FIND_CPU_OFFSET : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET: fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FLAGS_CHANGE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FREE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_IMPORT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT_10_2 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT_11_5 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_PROFILE_ADD : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_QUERY : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_SYNC : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_POST_TERM : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_READ_USER_PAGE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SET_FLAGS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SET_LIMITED_CORE_COUNT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SOFT_EVENT_UPDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STICKY_RESOURCE_MAP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STICKY_RESOURCE_UNMAP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STREAM_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_TLSTREAM_ACQUIRE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_TLSTREAM_FLUSH : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_VERSION_CHECK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_VERSION_CHECK_RESERVED : fd_bifrost [openat$bifrost openat$mali] ioctl$KVM_ASSIGN_SET_MSIX_ENTRY : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_ASSIGN_SET_MSIX_NR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DIRTY_LOG_RING : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DIRTY_LOG_RING_ACQ_REL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DISABLE_QUIRKS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DISABLE_QUIRKS2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_ENFORCE_PV_FEATURE_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_EXCEPTION_PAYLOAD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_EXIT_HYPERCALL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_EXIT_ON_EMULATION_FAILURE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HALT_POLL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_DIRECT_TLBFLUSH : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_ENFORCE_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_ENLIGHTENED_VMCS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_SEND_IPI : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_SYNIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_SYNIC2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_TLBFLUSH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_VP_INDEX : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MAX_VCPU_ID : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MEMORY_FAULT_INFO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MSR_PLATFORM_INFO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_PMU_CAPABILITY : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_PTP_KVM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SGX_ATTRIBUTE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SPLIT_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_STEAL_TIME : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SYNC_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_VM_COPY_ENC_CONTEXT_FROM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_DISABLE_NX_HUGE_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_TYPES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X2APIC_API : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_APIC_BUS_CYCLES_NS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_BUS_LOCK_EXIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_DISABLE_EXITS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_GUEST_MODE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_NOTIFY_VMEXIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_USER_SPACE_MSR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_XEN_HVM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CHECK_EXTENSION : fd_kvm [openat$kvm] ioctl$KVM_CHECK_EXTENSION_VM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CLEAR_DIRTY_LOG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_DEVICE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_GUEST_MEMFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_VCPU : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_VM : fd_kvm [openat$kvm] ioctl$KVM_DIRTY_TLB : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_API_VERSION : fd_kvm [openat$kvm] ioctl$KVM_GET_CLOCK : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_CPUID2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEBUGREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_GET_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_DIRTY_LOG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_EMULATED_CPUID : fd_kvm [openat$kvm] ioctl$KVM_GET_FPU : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_LAPIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MP_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MSRS_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MSRS_sys : fd_kvm [openat$kvm] ioctl$KVM_GET_MSR_FEATURE_INDEX_LIST : fd_kvm [openat$kvm] ioctl$KVM_GET_MSR_INDEX_LIST : fd_kvm [openat$kvm] ioctl$KVM_GET_NESTED_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_NR_MMU_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_ONE_REG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_PIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_REG_LIST : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SREGS2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_STATS_FD_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_STATS_FD_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_SUPPORTED_CPUID : fd_kvm [openat$kvm] ioctl$KVM_GET_SUPPORTED_HV_CPUID_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SUPPORTED_HV_CPUID_sys : fd_kvm [openat$kvm] ioctl$KVM_GET_TSC_KHZ_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_TSC_KHZ_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_VCPU_EVENTS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_VCPU_MMAP_SIZE : fd_kvm [openat$kvm] ioctl$KVM_GET_XCRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_XSAVE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_XSAVE2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_HAS_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_HAS_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_HAS_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_HYPERV_EVENTFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_INTERRUPT : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_IOEVENTFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQ_LINE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQ_LINE_STATUS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_KVMCLOCK_CTRL : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_MEMORY_ENCRYPT_REG_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_MEMORY_ENCRYPT_UNREG_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_NMI : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_PPC_ALLOCATE_HTAB : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_PRE_FAULT_MEMORY : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_REGISTER_COALESCED_MMIO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_REINJECT_CONTROL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_RESET_DIRTY_RINGS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_RUN : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_S390_VCPU_FAULT : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_BOOT_CPU_ID : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_CLOCK : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_CPUID2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEBUGREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_SET_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_FPU : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_GSI_ROUTING : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_GUEST_DEBUG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_IDENTITY_MAP_ADDR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_LAPIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_MEMORY_ATTRIBUTES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_MP_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_MSRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_NESTED_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_NR_MMU_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_ONE_REG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_PIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SIGNAL_MASK : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SREGS2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_TSC_KHZ_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_TSC_KHZ_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_TSS_ADDR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_USER_MEMORY_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_USER_MEMORY_REGION2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_VAPIC_ADDR : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_VCPU_EVENTS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_XCRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_XSAVE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SEV_CERT_EXPORT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_DBG_DECRYPT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_DBG_ENCRYPT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_ES_INIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_GET_ATTESTATION_REPORT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_GUEST_STATUS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_INIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_INIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_MEASURE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_SECRET : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_CANCEL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_UPDATE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SIGNAL_MSI : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SMI : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_TPR_ACCESS_REPORTING : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_TRANSLATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_UNREGISTER_COALESCED_MMIO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_X86_GET_MCE_CAP_SUPPORTED : fd_kvm [openat$kvm] ioctl$KVM_X86_SETUP_MCE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_X86_SET_MCE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_X86_SET_MSR_FILTER : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_XEN_HVM_CONFIG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$PERF_EVENT_IOC_DISABLE : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_ENABLE : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_ID : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_MODIFY_ATTRIBUTES : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_PAUSE_OUTPUT : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_PERIOD : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_QUERY_BPF : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_REFRESH : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_RESET : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_BPF : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_FILTER : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_OUTPUT : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$READ_COUNTERS : fd_rdma [openat$uverbs0] ioctl$SNDRV_FIREWIRE_IOCTL_GET_INFO : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_LOCK : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_TASCAM_STATE : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_UNLOCK : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_DSP_LOAD : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_DSP_STATUS : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_INFO : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_PVERSION : fd_snd_hw [syz_open_dev$sndhw] ioctl$TE_IOCTL_CLOSE_CLIENT_SESSION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_LAUNCH_OPERATION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_OPEN_CLIENT_SESSION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_SS_CMD : fd_tlk [openat$tlk_device] ioctl$TIPC_IOC_CONNECT : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] ioctl$TIPC_IOC_CONNECT_avb : fd_trusty_avb [openat$trusty_avb] ioctl$TIPC_IOC_CONNECT_gatekeeper : fd_trusty_gatekeeper [openat$trusty_gatekeeper] ioctl$TIPC_IOC_CONNECT_hwkey : fd_trusty_hwkey [openat$trusty_hwkey] ioctl$TIPC_IOC_CONNECT_hwrng : fd_trusty_hwrng [openat$trusty_hwrng] ioctl$TIPC_IOC_CONNECT_keymaster_secure : fd_trusty_km_secure [openat$trusty_km_secure] ioctl$TIPC_IOC_CONNECT_km : fd_trusty_km [openat$trusty_km] ioctl$TIPC_IOC_CONNECT_storage : fd_trusty_storage [openat$trusty_storage] ioctl$VFIO_CHECK_EXTENSION : fd_vfio [openat$vfio] ioctl$VFIO_GET_API_VERSION : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_GET_INFO : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_MAP_DMA : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_UNMAP_DMA : fd_vfio [openat$vfio] ioctl$VFIO_SET_IOMMU : fd_vfio [openat$vfio] ioctl$VTPM_PROXY_IOC_NEW_DEV : fd_vtpm [openat$vtpm] ioctl$sock_bt_cmtp_CMTPCONNADD : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPCONNDEL : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPGETCONNINFO : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPGETCONNLIST : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] mmap$DRM_I915 : fd_i915 [openat$i915] mmap$DRM_MSM : fd_msm [openat$msm] mmap$KVM_VCPU : vcpu_mmap_size [ioctl$KVM_GET_VCPU_MMAP_SIZE] mmap$bifrost : fd_bifrost [openat$bifrost openat$mali] mmap$perf : fd_perf [perf_event_open perf_event_open$cgroup] pkey_free : pkey [pkey_alloc] pkey_mprotect : pkey [pkey_alloc] read$sndhw : fd_snd_hw [syz_open_dev$sndhw] read$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] recvmsg$hf : sock_hf [socket$hf] sendmsg$hf : sock_hf [socket$hf] setsockopt$inet6_dccp_buf : sock_dccp6 [socket$inet6_dccp] setsockopt$inet6_dccp_int : sock_dccp6 [socket$inet6_dccp] setsockopt$inet_dccp_buf : sock_dccp [socket$inet_dccp] setsockopt$inet_dccp_int : sock_dccp [socket$inet_dccp] syz_kvm_add_vcpu$x86 : kvm_syz_vm$x86 [syz_kvm_setup_syzos_vm$x86] syz_kvm_assert_syzos_uexit$x86 : kvm_run_ptr [mmap$KVM_VCPU] syz_kvm_setup_cpu$x86 : fd_kvmvm [ioctl$KVM_CREATE_VM] syz_kvm_setup_syzos_vm$x86 : fd_kvmvm [ioctl$KVM_CREATE_VM] syz_memcpy_off$KVM_EXIT_HYPERCALL : kvm_run_ptr [mmap$KVM_VCPU] syz_memcpy_off$KVM_EXIT_MMIO : kvm_run_ptr [mmap$KVM_VCPU] write$ALLOC_MW : fd_rdma [openat$uverbs0] write$ALLOC_PD : fd_rdma [openat$uverbs0] write$ATTACH_MCAST : fd_rdma [openat$uverbs0] write$CLOSE_XRCD : fd_rdma [openat$uverbs0] write$CREATE_AH : fd_rdma [openat$uverbs0] write$CREATE_COMP_CHANNEL : fd_rdma [openat$uverbs0] write$CREATE_CQ : fd_rdma [openat$uverbs0] write$CREATE_CQ_EX : fd_rdma [openat$uverbs0] write$CREATE_FLOW : fd_rdma [openat$uverbs0] write$CREATE_QP : fd_rdma [openat$uverbs0] write$CREATE_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$CREATE_SRQ : fd_rdma [openat$uverbs0] write$CREATE_WQ : fd_rdma [openat$uverbs0] write$DEALLOC_MW : fd_rdma [openat$uverbs0] write$DEALLOC_PD : fd_rdma [openat$uverbs0] write$DEREG_MR : fd_rdma [openat$uverbs0] write$DESTROY_AH : fd_rdma [openat$uverbs0] write$DESTROY_CQ : fd_rdma [openat$uverbs0] write$DESTROY_FLOW : fd_rdma [openat$uverbs0] write$DESTROY_QP : fd_rdma [openat$uverbs0] write$DESTROY_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$DESTROY_SRQ : fd_rdma [openat$uverbs0] write$DESTROY_WQ : fd_rdma [openat$uverbs0] write$DETACH_MCAST : fd_rdma [openat$uverbs0] write$MLX5_ALLOC_PD : fd_rdma [openat$uverbs0] write$MLX5_CREATE_CQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_DV_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_SRQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_WQ : fd_rdma [openat$uverbs0] write$MLX5_GET_CONTEXT : fd_rdma [openat$uverbs0] write$MLX5_MODIFY_WQ : fd_rdma [openat$uverbs0] write$MODIFY_QP : fd_rdma [openat$uverbs0] write$MODIFY_SRQ : fd_rdma [openat$uverbs0] write$OPEN_XRCD : fd_rdma [openat$uverbs0] write$POLL_CQ : fd_rdma [openat$uverbs0] write$POST_RECV : fd_rdma [openat$uverbs0] write$POST_SEND : fd_rdma [openat$uverbs0] write$POST_SRQ_RECV : fd_rdma [openat$uverbs0] write$QUERY_DEVICE_EX : fd_rdma [openat$uverbs0] write$QUERY_PORT : fd_rdma [openat$uverbs0] write$QUERY_QP : fd_rdma [openat$uverbs0] write$QUERY_SRQ : fd_rdma [openat$uverbs0] write$REG_MR : fd_rdma [openat$uverbs0] write$REQ_NOTIFY_CQ : fd_rdma [openat$uverbs0] write$REREG_MR : fd_rdma [openat$uverbs0] write$RESIZE_CQ : fd_rdma [openat$uverbs0] write$capi20 : fd_capi20 [openat$capi20] write$capi20_data : fd_capi20 [openat$capi20] write$damon_attrs : fd_damon_attrs [openat$damon_attrs] write$damon_contexts : fd_damon_contexts [openat$damon_mk_contexts openat$damon_rm_contexts] write$damon_init_regions : fd_damon_init_regions [openat$damon_init_regions] write$damon_monitor_on : fd_damon_monitor_on [openat$damon_monitor_on] write$damon_schemes : fd_damon_schemes [openat$damon_schemes] write$damon_target_ids : fd_damon_target_ids [openat$damon_target_ids] write$proc_reclaim : fd_proc_reclaim [openat$proc_reclaim] write$sndhw : fd_snd_hw [syz_open_dev$sndhw] write$sndhw_fireworks : fd_snd_hw [syz_open_dev$sndhw] write$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] write$trusty_avb : fd_trusty_avb [openat$trusty_avb] write$trusty_gatekeeper : fd_trusty_gatekeeper [openat$trusty_gatekeeper] write$trusty_hwkey : fd_trusty_hwkey [openat$trusty_hwkey] write$trusty_hwrng : fd_trusty_hwrng [openat$trusty_hwrng] write$trusty_km : fd_trusty_km [openat$trusty_km] write$trusty_km_secure : fd_trusty_km_secure [openat$trusty_km_secure] write$trusty_storage : fd_trusty_storage [openat$trusty_storage] BinFmtMisc : enabled Comparisons : enabled Coverage : enabled DelayKcovMmap : enabled DevlinkPCI : PCI device 0000:00:10.0 is not available ExtraCoverage : enabled Fault : enabled KCSAN : write(/sys/kernel/debug/kcsan, on) failed KcovResetIoctl : kernel does not support ioctl(KCOV_RESET_TRACE) LRWPANEmulation : enabled Leak : failed to write(kmemleak, "scan=off") NetDevices : enabled NetInjection : enabled NicVF : PCI device 0000:00:11.0 is not available SandboxAndroid : setfilecon: setxattr failed. (errno 1: Operation not permitted). . process exited with status 67. SandboxNamespace : enabled SandboxNone : enabled SandboxSetuid : enabled Swap : enabled USBEmulation : enabled VhciInjection : enabled WifiEmulation : enabled syscalls : 3832/8048 2025/08/16 18:59:09 new: machine check complete 2025/08/16 18:59:10 new: adding 77726 seeds 2025/08/16 18:59:55 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 18:59:55 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:00:05 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:00:05 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:00:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:00:06 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:00:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:00:16 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:00:28 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:00:28 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:00:36 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:00:36 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:00:43 runner 6 connected 2025/08/16 19:00:54 runner 1 connected 2025/08/16 19:00:54 runner 5 connected 2025/08/16 19:01:07 runner 9 connected 2025/08/16 19:01:08 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:01:08 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:01:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:01:16 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:01:17 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:01:17 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:01:17 runner 8 connected 2025/08/16 19:01:19 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:01:19 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:01:24 runner 0 connected 2025/08/16 19:01:30 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:01:30 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:01:40 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:01:40 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:01:56 runner 2 connected 2025/08/16 19:01:57 runner 6 connected 2025/08/16 19:02:03 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:02:03 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:02:05 runner 5 connected 2025/08/16 19:02:06 patched crashed: lost connection to test machine [need repro = false] 2025/08/16 19:02:08 runner 1 connected 2025/08/16 19:02:11 runner 9 connected 2025/08/16 19:02:18 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:02:18 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:02:21 runner 8 connected 2025/08/16 19:02:27 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:02:27 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:02:43 base crash: lost connection to test machine 2025/08/16 19:02:46 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:02:46 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:02:50 runner 0 connected 2025/08/16 19:02:53 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:02:53 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:02:54 runner 4 connected 2025/08/16 19:02:57 STAT { "buffer too small": 0, "candidate triage jobs": 225, "candidates": 75604, "comps overflows": 0, "corpus": 1883, "corpus [files]": 503, "corpus [symbols]": 1085, "cover overflows": 1287, "coverage": 119499, "distributor delayed": 3892, "distributor undelayed": 3672, "distributor violated": 576, "exec candidate": 2122, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 0, "exec seeds": 0, "exec smash": 0, "exec total [base]": 9787, "exec total [new]": 9600, "exec triage": 6285, "executor restarts": 112, "fault jobs": 0, "fuzzer jobs": 225, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 126718, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 2122, "no exec duration": 232293000000, "no exec requests": 944, "pending": 17, "prog exec time": 176, "reproducing": 0, "rpc recv": 972580908, "rpc sent": 72601520, "signal": 117899, "smash jobs": 0, "triage jobs": 0, "vm output": 1168629, "vm restarts [base]": 4, "vm restarts [new]": 22 } 2025/08/16 19:02:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:02:57 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:03:07 runner 2 connected 2025/08/16 19:03:16 runner 6 connected 2025/08/16 19:03:25 base crash: KASAN: slab-use-after-free Read in xfrm_state_find 2025/08/16 19:03:31 runner 0 connected 2025/08/16 19:03:36 runner 9 connected 2025/08/16 19:03:42 runner 1 connected 2025/08/16 19:03:46 runner 8 connected 2025/08/16 19:04:05 base crash: KASAN: slab-use-after-free Read in xfrm_alloc_spi 2025/08/16 19:04:14 runner 2 connected 2025/08/16 19:04:19 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:04:19 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:04:28 patched crashed: WARNING in xfrm_state_fini [need repro = true] 2025/08/16 19:04:28 scheduled a reproduction of 'WARNING in xfrm_state_fini' 2025/08/16 19:04:31 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:04:31 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:04:54 runner 1 connected 2025/08/16 19:05:01 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:05:01 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:05:02 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:05:02 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:05:09 runner 8 connected 2025/08/16 19:05:12 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:05:12 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:05:13 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:05:13 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:05:17 runner 4 connected 2025/08/16 19:05:20 runner 5 connected 2025/08/16 19:05:20 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:05:20 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:05:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:05:42 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:05:51 runner 6 connected 2025/08/16 19:05:58 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = true] 2025/08/16 19:05:58 scheduled a reproduction of 'possible deadlock in ocfs2_try_remove_refcount_tree' 2025/08/16 19:06:02 runner 2 connected 2025/08/16 19:06:02 runner 0 connected 2025/08/16 19:06:04 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/16 19:06:09 runner 9 connected 2025/08/16 19:06:18 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/16 19:06:27 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:06:27 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:06:30 runner 5 connected 2025/08/16 19:06:36 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/16 19:06:38 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:06:38 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:06:40 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/16 19:06:47 runner 8 connected 2025/08/16 19:06:52 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:06:52 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:06:53 runner 1 connected 2025/08/16 19:06:59 runner 0 connected 2025/08/16 19:07:03 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:07:03 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:07:04 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:07:04 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:07:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:07:06 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:07:08 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:07:08 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:07:16 runner 4 connected 2025/08/16 19:07:24 runner 3 connected 2025/08/16 19:07:26 runner 9 connected 2025/08/16 19:07:29 runner 2 connected 2025/08/16 19:07:33 runner 6 connected 2025/08/16 19:07:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:07:42 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:07:48 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:07:48 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:07:52 runner 2 connected 2025/08/16 19:07:54 runner 5 connected 2025/08/16 19:07:57 STAT { "buffer too small": 0, "candidate triage jobs": 47, "candidates": 73592, "comps overflows": 0, "corpus": 4058, "corpus [files]": 993, "corpus [symbols]": 2058, "cover overflows": 2577, "coverage": 155628, "distributor delayed": 8431, "distributor undelayed": 8393, "distributor violated": 703, "exec candidate": 4134, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 0, "exec seeds": 0, "exec smash": 0, "exec total [base]": 18517, "exec total [new]": 18548, "exec triage": 12882, "executor restarts": 183, "fault jobs": 0, "fuzzer jobs": 47, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 0, "hints jobs": 0, "max signal": 158744, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 4134, "no exec duration": 277914000000, "no exec requests": 1086, "pending": 37, "prog exec time": 0, "reproducing": 0, "rpc recv": 1959446028, "rpc sent": 137327088, "signal": 154054, "smash jobs": 0, "triage jobs": 0, "vm output": 2849933, "vm restarts [base]": 11, "vm restarts [new]": 41 } 2025/08/16 19:07:57 runner 8 connected 2025/08/16 19:07:58 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:07:58 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:08:02 new: boot error: can't ssh into the instance 2025/08/16 19:08:03 new: boot error: can't ssh into the instance 2025/08/16 19:08:12 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:08:12 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:08:14 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:08:14 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:08:24 runner 4 connected 2025/08/16 19:08:26 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:08:26 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:08:29 runner 9 connected 2025/08/16 19:08:44 runner 7 connected 2025/08/16 19:08:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:08:44 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:08:44 runner 3 connected 2025/08/16 19:08:54 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:08:54 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:08:55 runner 5 connected 2025/08/16 19:08:55 runner 2 connected 2025/08/16 19:09:07 runner 8 connected 2025/08/16 19:09:13 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:09:13 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:09:17 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:09:17 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:09:25 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:09:25 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:09:27 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:09:27 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:09:32 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:09:32 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:09:33 runner 4 connected 2025/08/16 19:09:35 runner 9 connected 2025/08/16 19:09:54 runner 3 connected 2025/08/16 19:09:56 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:09:56 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:09:58 runner 7 connected 2025/08/16 19:10:00 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:10:00 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:10:08 runner 2 connected 2025/08/16 19:10:08 runner 5 connected 2025/08/16 19:10:13 runner 8 connected 2025/08/16 19:10:15 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:10:15 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:10:20 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:10:20 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:10:31 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:10:31 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:10:37 runner 4 connected 2025/08/16 19:10:41 runner 9 connected 2025/08/16 19:10:50 base crash: WARNING in xfrm6_tunnel_net_exit 2025/08/16 19:10:51 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:10:51 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:11:02 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:11:02 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:11:04 runner 3 connected 2025/08/16 19:11:08 runner 7 connected 2025/08/16 19:11:12 runner 2 connected 2025/08/16 19:11:24 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:11:24 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:11:35 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:11:35 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:11:36 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:11:36 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:11:38 runner 2 connected 2025/08/16 19:11:38 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:11:38 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:11:40 runner 5 connected 2025/08/16 19:11:40 base crash: possible deadlock in ocfs2_init_acl 2025/08/16 19:11:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:11:42 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:11:51 runner 8 connected 2025/08/16 19:12:03 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:12:03 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:12:05 runner 4 connected 2025/08/16 19:12:10 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:12:10 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:12:16 runner 9 connected 2025/08/16 19:12:17 runner 7 connected 2025/08/16 19:12:20 runner 2 connected 2025/08/16 19:12:23 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:12:23 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:12:36 base crash: possible deadlock in ocfs2_init_acl 2025/08/16 19:12:45 runner 5 connected 2025/08/16 19:12:47 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:12:47 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:12:51 runner 8 connected 2025/08/16 19:12:57 STAT { "buffer too small": 0, "candidate triage jobs": 281, "candidates": 72613, "comps overflows": 0, "corpus": 4784, "corpus [files]": 1148, "corpus [symbols]": 2342, "cover overflows": 3009, "coverage": 163431, "distributor delayed": 10857, "distributor undelayed": 10588, "distributor violated": 703, "exec candidate": 5113, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 5, "exec seeds": 0, "exec smash": 0, "exec total [base]": 22195, "exec total [new]": 22261, "exec triage": 15448, "executor restarts": 278, "fault jobs": 0, "fuzzer jobs": 281, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 2, "hints jobs": 0, "max signal": 171075, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 5113, "no exec duration": 707800000000, "no exec requests": 2021, "pending": 64, "prog exec time": 420, "reproducing": 0, "rpc recv": 2965020732, "rpc sent": 202065360, "signal": 161810, "smash jobs": 0, "triage jobs": 0, "vm output": 4004634, "vm restarts [base]": 12, "vm restarts [new]": 69 } 2025/08/16 19:12:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:12:59 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:13:07 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:13:07 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:13:13 runner 4 connected 2025/08/16 19:13:22 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:13:22 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:13:24 runner 3 connected 2025/08/16 19:13:36 runner 9 connected 2025/08/16 19:13:38 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:13:38 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:13:48 runner 2 connected 2025/08/16 19:13:48 runner 7 connected 2025/08/16 19:13:49 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:13:49 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:14:03 runner 5 connected 2025/08/16 19:14:07 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:14:07 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:14:19 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:14:19 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:14:20 runner 8 connected 2025/08/16 19:14:25 patched crashed: lost connection to test machine [need repro = false] 2025/08/16 19:14:30 runner 4 connected 2025/08/16 19:14:51 runner 9 connected 2025/08/16 19:15:08 new: boot error: can't ssh into the instance 2025/08/16 19:15:09 runner 2 connected 2025/08/16 19:15:15 runner 7 connected 2025/08/16 19:15:28 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:15:28 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:15:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:15:44 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:15:55 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:15:55 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:15:58 runner 1 connected 2025/08/16 19:16:24 runner 4 connected 2025/08/16 19:16:35 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:16:35 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:16:51 runner 2 connected 2025/08/16 19:16:54 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:16:54 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:17:09 new: boot error: can't ssh into the instance 2025/08/16 19:17:12 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:17:12 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:17:23 runner 1 connected 2025/08/16 19:17:38 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:17:38 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:17:44 runner 4 connected 2025/08/16 19:17:57 STAT { "buffer too small": 0, "candidate triage jobs": 61, "candidates": 71800, "comps overflows": 0, "corpus": 5805, "corpus [files]": 1326, "corpus [symbols]": 2709, "cover overflows": 3586, "coverage": 173726, "distributor delayed": 13033, "distributor undelayed": 12990, "distributor violated": 706, "exec candidate": 5926, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 6, "exec seeds": 0, "exec smash": 0, "exec total [base]": 26018, "exec total [new]": 26184, "exec triage": 18301, "executor restarts": 337, "fault jobs": 0, "fuzzer jobs": 61, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 4, "hints jobs": 0, "max signal": 177987, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 5926, "no exec duration": 882511000000, "no exec requests": 2420, "pending": 78, "prog exec time": 253, "reproducing": 0, "rpc recv": 3572159164, "rpc sent": 248480848, "signal": 171998, "smash jobs": 0, "triage jobs": 0, "vm output": 4961252, "vm restarts [base]": 13, "vm restarts [new]": 84 } 2025/08/16 19:17:58 runner 0 connected 2025/08/16 19:18:01 runner 8 connected 2025/08/16 19:18:04 new: boot error: can't ssh into the instance 2025/08/16 19:18:13 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:18:13 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:18:26 runner 2 connected 2025/08/16 19:18:53 runner 6 connected 2025/08/16 19:19:02 runner 9 connected 2025/08/16 19:19:12 base crash: KASAN: slab-use-after-free Read in __xfrm_state_lookup 2025/08/16 19:19:12 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:19:12 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:19:13 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:19:13 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:19:24 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:19:24 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:19:24 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:19:24 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:19:48 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:19:48 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:19:53 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:19:53 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:19:58 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:19:58 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:20:00 runner 1 connected 2025/08/16 19:20:01 runner 0 connected 2025/08/16 19:20:02 runner 4 connected 2025/08/16 19:20:12 runner 8 connected 2025/08/16 19:20:14 runner 5 connected 2025/08/16 19:20:34 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:20:34 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:20:37 runner 1 connected 2025/08/16 19:20:42 runner 6 connected 2025/08/16 19:20:46 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:20:46 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:20:47 runner 9 connected 2025/08/16 19:20:52 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:20:52 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:20:56 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:20:56 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:20:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:20:59 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:21:04 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:21:04 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:21:09 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:21:09 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:21:11 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:21:11 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:21:22 runner 2 connected 2025/08/16 19:21:24 base crash: KASAN: slab-use-after-free Read in xfrm_alloc_spi 2025/08/16 19:21:34 base crash: KASAN: slab-use-after-free Read in xfrm_alloc_spi 2025/08/16 19:21:34 runner 8 connected 2025/08/16 19:21:41 runner 4 connected 2025/08/16 19:21:46 base: boot error: can't ssh into the instance 2025/08/16 19:21:46 runner 5 connected 2025/08/16 19:21:47 new: boot error: can't ssh into the instance 2025/08/16 19:21:52 runner 1 connected 2025/08/16 19:21:53 runner 9 connected 2025/08/16 19:21:54 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:21:54 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:22:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:22:06 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:22:09 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:22:09 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:22:18 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:22:18 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:22:19 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:22:19 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:22:21 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:22:21 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:22:22 runner 3 connected 2025/08/16 19:22:34 runner 0 connected 2025/08/16 19:22:36 runner 3 connected 2025/08/16 19:22:43 runner 2 connected 2025/08/16 19:22:47 runner 8 connected 2025/08/16 19:22:56 runner 4 connected 2025/08/16 19:22:57 STAT { "buffer too small": 0, "candidate triage jobs": 37, "candidates": 69888, "comps overflows": 0, "corpus": 7725, "corpus [files]": 1583, "corpus [symbols]": 3351, "cover overflows": 4732, "coverage": 187383, "distributor delayed": 17314, "distributor undelayed": 17285, "distributor violated": 811, "exec candidate": 7838, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 9, "exec seeds": 0, "exec smash": 0, "exec total [base]": 34642, "exec total [new]": 34815, "exec triage": 24196, "executor restarts": 401, "fault jobs": 0, "fuzzer jobs": 37, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 191822, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 7838, "no exec duration": 897653000000, "no exec requests": 2469, "pending": 100, "prog exec time": 89, "reproducing": 0, "rpc recv": 4474288324, "rpc sent": 335417808, "signal": 185502, "smash jobs": 0, "triage jobs": 0, "vm output": 6287119, "vm restarts [base]": 16, "vm restarts [new]": 106 } 2025/08/16 19:22:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:22:57 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:23:00 runner 9 connected 2025/08/16 19:23:01 runner 5 connected 2025/08/16 19:23:01 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:23:01 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:23:02 runner 1 connected 2025/08/16 19:23:15 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:23:15 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:23:25 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:23:25 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:23:27 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:23:27 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:23:37 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:23:37 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:23:43 runner 2 connected 2025/08/16 19:23:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:23:44 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:23:45 runner 3 connected 2025/08/16 19:24:01 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:24:01 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:24:02 runner 8 connected 2025/08/16 19:24:07 runner 1 connected 2025/08/16 19:24:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:24:16 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:24:18 runner 4 connected 2025/08/16 19:24:26 runner 9 connected 2025/08/16 19:24:29 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:24:29 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:24:40 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:24:40 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:24:42 runner 2 connected 2025/08/16 19:24:58 runner 3 connected 2025/08/16 19:24:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:24:59 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:25:10 runner 8 connected 2025/08/16 19:25:11 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:25:11 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:25:29 runner 1 connected 2025/08/16 19:25:41 runner 4 connected 2025/08/16 19:25:50 new: boot error: can't ssh into the instance 2025/08/16 19:25:56 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:25:56 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:26:01 runner 9 connected 2025/08/16 19:26:27 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:26:27 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:26:38 runner 7 connected 2025/08/16 19:26:45 runner 2 connected 2025/08/16 19:26:45 base crash: general protection fault in pcl818_ai_cancel 2025/08/16 19:26:53 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:26:53 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:26:58 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:26:58 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:27:03 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:27:03 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:27:04 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:27:04 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:27:10 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:27:10 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:27:15 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:27:15 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:27:16 runner 9 connected 2025/08/16 19:27:27 runner 0 connected 2025/08/16 19:27:34 runner 3 connected 2025/08/16 19:27:40 runner 8 connected 2025/08/16 19:27:45 runner 4 connected 2025/08/16 19:27:48 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:27:48 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:27:51 runner 2 connected 2025/08/16 19:27:56 runner 7 connected 2025/08/16 19:27:57 STAT { "buffer too small": 0, "candidate triage jobs": 81, "candidates": 68344, "comps overflows": 0, "corpus": 9206, "corpus [files]": 1840, "corpus [symbols]": 3878, "cover overflows": 5665, "coverage": 196758, "distributor delayed": 21230, "distributor undelayed": 21168, "distributor violated": 852, "exec candidate": 9382, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 9, "exec seeds": 0, "exec smash": 0, "exec total [base]": 41342, "exec total [new]": 41660, "exec triage": 28827, "executor restarts": 482, "fault jobs": 0, "fuzzer jobs": 81, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 3, "hints jobs": 0, "max signal": 201428, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 9382, "no exec duration": 1194156000000, "no exec requests": 3165, "pending": 122, "prog exec time": 234, "reproducing": 0, "rpc recv": 5318393796, "rpc sent": 414986872, "signal": 194771, "smash jobs": 0, "triage jobs": 0, "vm output": 7597577, "vm restarts [base]": 17, "vm restarts [new]": 129 } 2025/08/16 19:28:00 base crash: unregister_netdevice: waiting for DEV to become free 2025/08/16 19:28:37 runner 9 connected 2025/08/16 19:28:48 runner 3 connected 2025/08/16 19:29:01 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:29:01 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:29:04 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:29:04 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:29:12 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:29:12 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:29:15 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:29:15 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:29:18 patched crashed: lost connection to test machine [need repro = false] 2025/08/16 19:29:26 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:29:26 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:29:45 runner 4 connected 2025/08/16 19:29:49 runner 7 connected 2025/08/16 19:29:54 runner 3 connected 2025/08/16 19:30:02 runner 8 connected 2025/08/16 19:30:12 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:30:12 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:30:29 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:30:29 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:30:36 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:30:36 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:30:47 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:30:47 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:30:53 runner 7 connected 2025/08/16 19:31:02 new: boot error: can't ssh into the instance 2025/08/16 19:31:10 runner 4 connected 2025/08/16 19:31:15 new: boot error: can't ssh into the instance 2025/08/16 19:31:18 runner 3 connected 2025/08/16 19:31:29 runner 8 connected 2025/08/16 19:31:29 base: boot error: can't ssh into the instance 2025/08/16 19:31:37 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:31:37 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:31:39 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:31:39 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:31:43 runner 0 connected 2025/08/16 19:31:47 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:31:47 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:31:54 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:31:54 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:31:56 runner 6 connected 2025/08/16 19:32:10 runner 1 connected 2025/08/16 19:32:15 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:32:15 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:32:18 runner 7 connected 2025/08/16 19:32:20 runner 4 connected 2025/08/16 19:32:26 patched crashed: lost connection to test machine [need repro = false] 2025/08/16 19:32:28 runner 3 connected 2025/08/16 19:32:36 runner 8 connected 2025/08/16 19:32:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:32:42 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:32:57 runner 0 connected 2025/08/16 19:32:57 STAT { "buffer too small": 0, "candidate triage jobs": 211, "candidates": 66941, "comps overflows": 0, "corpus": 10451, "corpus [files]": 2030, "corpus [symbols]": 4287, "cover overflows": 6524, "coverage": 202948, "distributor delayed": 24451, "distributor undelayed": 24258, "distributor violated": 853, "exec candidate": 10785, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 9, "exec seeds": 0, "exec smash": 0, "exec total [base]": 47741, "exec total [new]": 47833, "exec triage": 32928, "executor restarts": 562, "fault jobs": 0, "fuzzer jobs": 211, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 3, "hints jobs": 0, "max signal": 208115, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 10785, "no exec duration": 1497575000000, "no exec requests": 3878, "pending": 137, "prog exec time": 238, "reproducing": 0, "rpc recv": 6020635348, "rpc sent": 480608256, "signal": 200882, "smash jobs": 0, "triage jobs": 0, "vm output": 8755829, "vm restarts [base]": 19, "vm restarts [new]": 145 } 2025/08/16 19:33:11 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:33:11 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:33:12 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:33:12 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:33:15 runner 6 connected 2025/08/16 19:33:24 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:33:24 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:33:31 runner 7 connected 2025/08/16 19:33:33 new: boot error: can't ssh into the instance 2025/08/16 19:33:52 runner 4 connected 2025/08/16 19:33:53 runner 8 connected 2025/08/16 19:33:55 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:33:55 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:34:05 runner 3 connected 2025/08/16 19:34:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:34:06 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:34:14 runner 5 connected 2025/08/16 19:34:37 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:34:37 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:34:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:34:42 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:34:43 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:34:43 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:34:44 runner 0 connected 2025/08/16 19:34:48 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:34:48 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:34:54 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:34:54 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:34:55 runner 6 connected 2025/08/16 19:35:13 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:35:13 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:35:18 runner 8 connected 2025/08/16 19:35:23 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:35:23 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:35:24 runner 7 connected 2025/08/16 19:35:29 runner 5 connected 2025/08/16 19:35:30 runner 3 connected 2025/08/16 19:35:35 runner 4 connected 2025/08/16 19:35:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:35:44 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:35:46 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:35:46 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:35:54 runner 0 connected 2025/08/16 19:35:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:35:57 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:36:05 runner 6 connected 2025/08/16 19:36:12 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:36:12 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:36:24 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:36:24 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:36:25 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:36:25 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:36:25 runner 8 connected 2025/08/16 19:36:27 runner 7 connected 2025/08/16 19:36:39 runner 3 connected 2025/08/16 19:36:45 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:36:45 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:36:49 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:36:49 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:36:53 runner 5 connected 2025/08/16 19:37:00 base crash: WARNING in ext4_xattr_inode_lookup_create 2025/08/16 19:37:00 patched crashed: WARNING in ext4_xattr_inode_lookup_create [need repro = false] 2025/08/16 19:37:04 base crash: WARNING in ext4_xattr_inode_lookup_create 2025/08/16 19:37:05 patched crashed: WARNING in ext4_xattr_inode_lookup_create [need repro = false] 2025/08/16 19:37:05 runner 4 connected 2025/08/16 19:37:07 runner 0 connected 2025/08/16 19:37:09 new: boot error: can't ssh into the instance 2025/08/16 19:37:15 base crash: WARNING in ext4_xattr_inode_lookup_create 2025/08/16 19:37:15 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:37:15 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:37:26 patched crashed: WARNING in ext4_xattr_inode_lookup_create [need repro = false] 2025/08/16 19:37:28 patched crashed: WARNING in ext4_xattr_inode_lookup_create [need repro = false] 2025/08/16 19:37:28 base crash: WARNING in ext4_xattr_inode_lookup_create 2025/08/16 19:37:33 runner 8 connected 2025/08/16 19:37:39 runner 6 connected 2025/08/16 19:37:41 runner 1 connected 2025/08/16 19:37:42 runner 7 connected 2025/08/16 19:37:45 runner 3 connected 2025/08/16 19:37:50 runner 1 connected 2025/08/16 19:37:57 STAT { "buffer too small": 0, "candidate triage jobs": 79, "candidates": 65796, "comps overflows": 0, "corpus": 11706, "corpus [files]": 2208, "corpus [symbols]": 4688, "cover overflows": 6974, "coverage": 209037, "distributor delayed": 27646, "distributor undelayed": 27584, "distributor violated": 868, "exec candidate": 11930, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 12, "exec seeds": 0, "exec smash": 0, "exec total [base]": 52477, "exec total [new]": 53003, "exec triage": 36563, "executor restarts": 648, "fault jobs": 0, "fuzzer jobs": 79, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 3, "hints jobs": 0, "max signal": 213946, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 11930, "no exec duration": 2194124000000, "no exec requests": 5512, "pending": 158, "prog exec time": 308, "reproducing": 0, "rpc recv": 6958428984, "rpc sent": 550848632, "signal": 206898, "smash jobs": 0, "triage jobs": 0, "vm output": 10021325, "vm restarts [base]": 21, "vm restarts [new]": 170 } 2025/08/16 19:38:10 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:38:10 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:38:14 runner 4 connected 2025/08/16 19:38:16 runner 2 connected 2025/08/16 19:38:17 runner 0 connected 2025/08/16 19:38:19 patched crashed: WARNING in xfrm_state_fini [need repro = true] 2025/08/16 19:38:19 scheduled a reproduction of 'WARNING in xfrm_state_fini' 2025/08/16 19:38:21 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:38:21 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:38:25 base crash: WARNING in xfrm_state_fini 2025/08/16 19:38:35 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:38:35 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:38:45 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:38:45 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:38:51 runner 8 connected 2025/08/16 19:38:52 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:38:52 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:39:06 runner 6 connected 2025/08/16 19:39:06 runner 1 connected 2025/08/16 19:39:16 runner 1 connected 2025/08/16 19:39:18 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:39:18 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:39:24 new: boot error: can't ssh into the instance 2025/08/16 19:39:26 runner 4 connected 2025/08/16 19:39:29 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:39:29 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:39:31 new: boot error: can't ssh into the instance 2025/08/16 19:39:33 runner 0 connected 2025/08/16 19:39:36 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:39:36 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:39:50 base crash: KASAN: slab-use-after-free Read in xfrm_alloc_spi 2025/08/16 19:39:56 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:39:56 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:40:05 runner 2 connected 2025/08/16 19:40:07 runner 8 connected 2025/08/16 19:40:10 runner 6 connected 2025/08/16 19:40:17 runner 1 connected 2025/08/16 19:40:31 runner 3 connected 2025/08/16 19:40:34 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:40:34 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:40:35 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:40:35 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:40:45 runner 0 connected 2025/08/16 19:40:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:40:59 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:41:10 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:41:10 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:41:11 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:41:11 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:41:15 runner 4 connected 2025/08/16 19:41:17 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:41:17 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:41:22 runner 8 connected 2025/08/16 19:41:40 runner 2 connected 2025/08/16 19:41:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:41:42 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:41:43 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:41:43 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:41:51 runner 1 connected 2025/08/16 19:41:52 runner 0 connected 2025/08/16 19:41:58 runner 6 connected 2025/08/16 19:41:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:41:59 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:42:14 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:42:14 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:42:17 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:42:17 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:42:23 runner 8 connected 2025/08/16 19:42:24 runner 4 connected 2025/08/16 19:42:25 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:42:25 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:42:41 runner 2 connected 2025/08/16 19:42:46 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:42:46 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:42:49 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:42:49 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:42:55 runner 1 connected 2025/08/16 19:42:57 STAT { "buffer too small": 0, "candidate triage jobs": 59, "candidates": 64873, "comps overflows": 0, "corpus": 12627, "corpus [files]": 2327, "corpus [symbols]": 4972, "cover overflows": 7439, "coverage": 212785, "distributor delayed": 29953, "distributor undelayed": 29951, "distributor violated": 868, "exec candidate": 12853, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 12, "exec seeds": 0, "exec smash": 0, "exec total [base]": 56934, "exec total [new]": 57333, "exec triage": 39411, "executor restarts": 728, "fault jobs": 0, "fuzzer jobs": 59, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 0, "hints jobs": 0, "max signal": 217651, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 12853, "no exec duration": 2398436000000, "no exec requests": 6029, "pending": 182, "prog exec time": 162, "reproducing": 0, "rpc recv": 7806860964, "rpc sent": 620415216, "signal": 210653, "smash jobs": 0, "triage jobs": 0, "vm output": 11023769, "vm restarts [base]": 24, "vm restarts [new]": 192 } 2025/08/16 19:42:59 runner 6 connected 2025/08/16 19:43:05 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:43:05 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:43:07 runner 0 connected 2025/08/16 19:43:27 runner 8 connected 2025/08/16 19:43:31 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:43:31 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:43:36 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:43:36 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:43:37 runner 4 connected 2025/08/16 19:43:43 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:43:43 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:43:58 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:43:58 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:44:04 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:44:04 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:44:12 runner 1 connected 2025/08/16 19:44:17 runner 6 connected 2025/08/16 19:44:24 runner 0 connected 2025/08/16 19:44:34 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:44:34 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:44:39 runner 8 connected 2025/08/16 19:44:46 runner 4 connected 2025/08/16 19:44:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:44:57 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:44:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:44:59 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:45:09 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:45:09 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:45:11 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:45:11 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:45:16 runner 1 connected 2025/08/16 19:45:38 runner 6 connected 2025/08/16 19:45:41 runner 0 connected 2025/08/16 19:45:50 runner 4 connected 2025/08/16 19:45:52 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:45:52 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:46:05 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:46:05 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:46:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:46:06 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:46:18 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:46:18 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:46:34 runner 1 connected 2025/08/16 19:46:46 runner 6 connected 2025/08/16 19:46:53 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:46:53 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:47:01 runner 4 connected 2025/08/16 19:47:09 patched crashed: kernel BUG in txUnlock [need repro = true] 2025/08/16 19:47:09 scheduled a reproduction of 'kernel BUG in txUnlock' 2025/08/16 19:47:09 base crash: kernel BUG in txUnlock 2025/08/16 19:47:11 new: boot error: can't ssh into the instance 2025/08/16 19:47:21 base: boot error: can't ssh into the instance 2025/08/16 19:47:21 new: boot error: can't ssh into the instance 2025/08/16 19:47:22 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/08/16 19:47:23 base crash: kernel BUG in txUnlock 2025/08/16 19:47:34 runner 1 connected 2025/08/16 19:47:51 runner 1 connected 2025/08/16 19:47:51 runner 6 connected 2025/08/16 19:47:52 runner 3 connected 2025/08/16 19:47:57 STAT { "buffer too small": 0, "candidate triage jobs": 52, "candidates": 64375, "comps overflows": 0, "corpus": 13122, "corpus [files]": 2411, "corpus [symbols]": 5143, "cover overflows": 7721, "coverage": 214781, "distributor delayed": 31224, "distributor undelayed": 31173, "distributor violated": 870, "exec candidate": 13351, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 12, "exec seeds": 0, "exec smash": 0, "exec total [base]": 59176, "exec total [new]": 59557, "exec triage": 40913, "executor restarts": 788, "fault jobs": 0, "fuzzer jobs": 52, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 0, "hints jobs": 0, "max signal": 220036, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 13351, "no exec duration": 2724020000000, "no exec requests": 6737, "pending": 199, "prog exec time": 339, "reproducing": 0, "rpc recv": 8470176660, "rpc sent": 671230264, "signal": 212672, "smash jobs": 0, "triage jobs": 0, "vm output": 11721434, "vm restarts [base]": 25, "vm restarts [new]": 211 } 2025/08/16 19:47:58 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:47:58 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:48:02 runner 5 connected 2025/08/16 19:48:03 runner 0 connected 2025/08/16 19:48:04 runner 4 connected 2025/08/16 19:48:05 runner 3 connected 2025/08/16 19:48:11 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:48:11 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:48:14 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/08/16 19:48:24 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/08/16 19:48:24 base crash: kernel BUG in txUnlock 2025/08/16 19:48:27 new: boot error: can't ssh into the instance 2025/08/16 19:48:38 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:48:38 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:48:39 runner 1 connected 2025/08/16 19:48:52 runner 6 connected 2025/08/16 19:48:55 runner 3 connected 2025/08/16 19:49:06 runner 0 connected 2025/08/16 19:49:07 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/08/16 19:49:08 runner 7 connected 2025/08/16 19:49:11 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:49:11 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:49:20 runner 4 connected 2025/08/16 19:49:37 new: boot error: can't ssh into the instance 2025/08/16 19:49:51 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:49:51 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:49:55 runner 1 connected 2025/08/16 19:49:59 runner 6 connected 2025/08/16 19:50:02 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:50:02 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:50:18 runner 9 connected 2025/08/16 19:50:19 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:50:19 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:50:30 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:50:30 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:50:32 runner 3 connected 2025/08/16 19:50:34 base crash: possible deadlock in ocfs2_reserve_suballoc_bits 2025/08/16 19:50:35 patched crashed: possible deadlock in ocfs2_reserve_suballoc_bits [need repro = false] 2025/08/16 19:50:43 runner 7 connected 2025/08/16 19:50:51 patched crashed: possible deadlock in ocfs2_reserve_suballoc_bits [need repro = false] 2025/08/16 19:51:00 runner 4 connected 2025/08/16 19:51:07 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:51:07 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:51:12 runner 1 connected 2025/08/16 19:51:16 runner 3 connected 2025/08/16 19:51:16 runner 6 connected 2025/08/16 19:51:21 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:51:21 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:51:32 runner 3 connected 2025/08/16 19:51:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:51:44 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:51:48 runner 9 connected 2025/08/16 19:51:48 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:51:48 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:51:51 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:51:51 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:52:01 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:52:01 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:52:03 runner 7 connected 2025/08/16 19:52:07 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:52:07 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:52:22 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:52:22 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:52:25 runner 4 connected 2025/08/16 19:52:29 runner 6 connected 2025/08/16 19:52:42 runner 3 connected 2025/08/16 19:52:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:52:44 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:52:49 runner 9 connected 2025/08/16 19:52:50 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:52:50 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:52:57 STAT { "buffer too small": 0, "candidate triage jobs": 43, "candidates": 63595, "comps overflows": 0, "corpus": 13904, "corpus [files]": 2539, "corpus [symbols]": 5394, "cover overflows": 8074, "coverage": 218725, "distributor delayed": 33079, "distributor undelayed": 33050, "distributor violated": 872, "exec candidate": 14131, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 12, "exec seeds": 0, "exec smash": 0, "exec total [base]": 62635, "exec total [new]": 63070, "exec triage": 43274, "executor restarts": 852, "fault jobs": 0, "fuzzer jobs": 43, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 223682, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 14131, "no exec duration": 3357330000000, "no exec requests": 8281, "pending": 217, "prog exec time": 51, "reproducing": 0, "rpc recv": 9374111188, "rpc sent": 742043392, "signal": 216619, "smash jobs": 0, "triage jobs": 0, "vm output": 12756030, "vm restarts [base]": 29, "vm restarts [new]": 233 } 2025/08/16 19:53:01 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:53:01 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:53:04 runner 7 connected 2025/08/16 19:53:10 new: boot error: can't ssh into the instance 2025/08/16 19:53:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:53:16 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:53:25 runner 4 connected 2025/08/16 19:53:28 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:53:28 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:53:32 runner 6 connected 2025/08/16 19:53:42 runner 3 connected 2025/08/16 19:53:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:53:44 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:53:52 runner 2 connected 2025/08/16 19:53:57 runner 9 connected 2025/08/16 19:54:01 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:54:01 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:54:07 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:54:07 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:54:09 runner 7 connected 2025/08/16 19:54:17 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:54:17 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:54:20 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:54:20 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:54:26 runner 4 connected 2025/08/16 19:54:42 runner 3 connected 2025/08/16 19:54:48 runner 6 connected 2025/08/16 19:54:58 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/16 19:54:59 runner 2 connected 2025/08/16 19:54:59 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/16 19:55:02 runner 9 connected 2025/08/16 19:55:16 new: boot error: can't ssh into the instance 2025/08/16 19:55:17 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:55:17 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:55:28 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:55:28 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:55:28 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:55:28 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:55:30 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/16 19:55:31 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:55:31 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:55:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:55:44 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:55:47 runner 0 connected 2025/08/16 19:55:48 runner 1 connected 2025/08/16 19:55:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:55:57 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:56:04 runner 8 connected 2025/08/16 19:56:05 runner 7 connected 2025/08/16 19:56:09 runner 6 connected 2025/08/16 19:56:11 new: boot error: can't ssh into the instance 2025/08/16 19:56:17 runner 2 connected 2025/08/16 19:56:19 runner 2 connected 2025/08/16 19:56:20 runner 4 connected 2025/08/16 19:56:25 runner 3 connected 2025/08/16 19:56:32 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:56:32 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:56:45 runner 9 connected 2025/08/16 19:56:51 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:56:51 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:56:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:56:57 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:56:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:56:59 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:57:00 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:57:00 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:57:01 runner 0 connected 2025/08/16 19:57:02 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:57:02 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:57:10 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:57:10 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:57:21 runner 6 connected 2025/08/16 19:57:23 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:57:23 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:57:32 runner 2 connected 2025/08/16 19:57:38 runner 3 connected 2025/08/16 19:57:40 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:57:40 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:57:40 runner 7 connected 2025/08/16 19:57:41 runner 4 connected 2025/08/16 19:57:43 runner 8 connected 2025/08/16 19:57:52 runner 9 connected 2025/08/16 19:57:57 STAT { "buffer too small": 0, "candidate triage jobs": 28, "candidates": 62573, "comps overflows": 0, "corpus": 14928, "corpus [files]": 2678, "corpus [symbols]": 5702, "cover overflows": 8697, "coverage": 222314, "distributor delayed": 35588, "distributor undelayed": 35584, "distributor violated": 910, "exec candidate": 15153, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 12, "exec seeds": 0, "exec smash": 0, "exec total [base]": 67161, "exec total [new]": 67692, "exec triage": 46370, "executor restarts": 941, "fault jobs": 0, "fuzzer jobs": 28, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 5, "hints jobs": 0, "max signal": 227476, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 15153, "no exec duration": 3825795000000, "no exec requests": 9322, "pending": 240, "prog exec time": 260, "reproducing": 0, "rpc recv": 10362739452, "rpc sent": 820583368, "signal": 220215, "smash jobs": 0, "triage jobs": 0, "vm output": 14005573, "vm restarts [base]": 32, "vm restarts [new]": 260 } 2025/08/16 19:58:11 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:58:11 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:58:11 runner 0 connected 2025/08/16 19:58:22 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:58:22 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:58:27 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:58:27 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:58:28 runner 6 connected 2025/08/16 19:58:30 new: boot error: can't ssh into the instance 2025/08/16 19:58:38 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:58:38 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:58:41 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:58:41 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:58:45 base crash: WARNING in xfrm_state_fini 2025/08/16 19:58:52 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:58:52 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:58:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:58:57 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:58:59 runner 4 connected 2025/08/16 19:59:11 runner 8 connected 2025/08/16 19:59:11 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:59:11 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:59:16 runner 3 connected 2025/08/16 19:59:18 runner 5 connected 2025/08/16 19:59:20 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:59:20 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:59:25 runner 7 connected 2025/08/16 19:59:26 runner 0 connected 2025/08/16 19:59:30 runner 2 connected 2025/08/16 19:59:34 runner 9 connected 2025/08/16 19:59:37 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:59:37 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:59:38 runner 0 connected 2025/08/16 19:59:47 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:59:47 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 19:59:55 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 19:59:55 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:00:00 runner 6 connected 2025/08/16 20:00:01 runner 4 connected 2025/08/16 20:00:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:00:06 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:00:28 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:00:28 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:00:29 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:00:29 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:00:35 runner 8 connected 2025/08/16 20:00:38 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:00:38 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:00:40 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:00:40 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:00:43 runner 5 connected 2025/08/16 20:00:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:00:44 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:00:54 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:00:54 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:01:09 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:01:09 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:01:09 runner 0 connected 2025/08/16 20:01:10 runner 7 connected 2025/08/16 20:01:20 runner 9 connected 2025/08/16 20:01:21 runner 6 connected 2025/08/16 20:01:26 runner 4 connected 2025/08/16 20:01:28 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:01:28 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:01:32 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:01:32 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:01:35 runner 8 connected 2025/08/16 20:01:43 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:01:43 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:01:49 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:01:49 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:01:50 runner 5 connected 2025/08/16 20:01:53 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:01:53 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:01:57 new: boot error: can't ssh into the instance 2025/08/16 20:02:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:02:06 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:02:10 runner 0 connected 2025/08/16 20:02:13 runner 7 connected 2025/08/16 20:02:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:02:16 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:02:25 runner 6 connected 2025/08/16 20:02:31 runner 4 connected 2025/08/16 20:02:33 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:02:33 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:02:34 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:02:34 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:02:34 runner 9 connected 2025/08/16 20:02:45 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:02:45 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:02:47 runner 8 connected 2025/08/16 20:02:50 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:02:50 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:02:57 STAT { "buffer too small": 0, "candidate triage jobs": 45, "candidates": 61416, "comps overflows": 0, "corpus": 16045, "corpus [files]": 2807, "corpus [symbols]": 6009, "cover overflows": 9248, "coverage": 226676, "distributor delayed": 38202, "distributor undelayed": 38187, "distributor violated": 911, "exec candidate": 16310, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 12, "exec seeds": 0, "exec smash": 0, "exec total [base]": 72329, "exec total [new]": 72888, "exec triage": 49806, "executor restarts": 1040, "fault jobs": 0, "fuzzer jobs": 45, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 231847, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 16310, "no exec duration": 4261840000000, "no exec requests": 10419, "pending": 271, "prog exec time": 157, "reproducing": 0, "rpc recv": 11356760236, "rpc sent": 907117136, "signal": 224587, "smash jobs": 0, "triage jobs": 0, "vm output": 15360396, "vm restarts [base]": 33, "vm restarts [new]": 287 } 2025/08/16 20:02:58 runner 5 connected 2025/08/16 20:02:58 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:02:58 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:03:10 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:03:10 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:03:14 runner 7 connected 2025/08/16 20:03:23 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:03:23 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:03:26 runner 6 connected 2025/08/16 20:03:31 runner 4 connected 2025/08/16 20:03:39 runner 9 connected 2025/08/16 20:03:52 runner 8 connected 2025/08/16 20:04:11 runner 5 connected 2025/08/16 20:04:31 base crash: lost connection to test machine 2025/08/16 20:04:41 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:04:41 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:05:03 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:05:03 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:05:08 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:05:08 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:05:10 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:05:10 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:05:15 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:05:15 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:05:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:05:16 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:05:21 runner 0 connected 2025/08/16 20:05:30 runner 7 connected 2025/08/16 20:05:52 runner 4 connected 2025/08/16 20:05:56 runner 5 connected 2025/08/16 20:05:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:05:57 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:05:58 runner 8 connected 2025/08/16 20:06:03 runner 9 connected 2025/08/16 20:06:05 runner 6 connected 2025/08/16 20:06:11 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:06:11 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:06:19 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:06:19 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:06:23 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:06:23 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:06:34 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:06:34 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:06:39 runner 7 connected 2025/08/16 20:06:43 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:06:43 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:06:52 runner 4 connected 2025/08/16 20:06:58 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:06:58 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:07:00 runner 8 connected 2025/08/16 20:07:04 runner 5 connected 2025/08/16 20:07:16 runner 9 connected 2025/08/16 20:07:19 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:07:19 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:07:24 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:07:24 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:07:24 runner 6 connected 2025/08/16 20:07:39 runner 7 connected 2025/08/16 20:07:39 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:07:39 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:07:51 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:07:51 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:07:57 STAT { "buffer too small": 0, "candidate triage jobs": 140, "candidates": 59883, "comps overflows": 0, "corpus": 17467, "corpus [files]": 2965, "corpus [symbols]": 6369, "cover overflows": 10032, "coverage": 231339, "distributor delayed": 41290, "distributor undelayed": 41217, "distributor violated": 961, "exec candidate": 17843, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 12, "exec seeds": 0, "exec smash": 0, "exec total [base]": 79440, "exec total [new]": 79986, "exec triage": 54305, "executor restarts": 1115, "fault jobs": 0, "fuzzer jobs": 140, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 236912, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 17843, "no exec duration": 4705963000000, "no exec requests": 11627, "pending": 291, "prog exec time": 214, "reproducing": 0, "rpc recv": 12119986740, "rpc sent": 985152360, "signal": 229203, "smash jobs": 0, "triage jobs": 0, "vm output": 16665056, "vm restarts [base]": 34, "vm restarts [new]": 307 } 2025/08/16 20:08:00 runner 8 connected 2025/08/16 20:08:00 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:08:00 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:08:06 runner 4 connected 2025/08/16 20:08:14 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:08:14 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:08:20 runner 5 connected 2025/08/16 20:08:32 runner 9 connected 2025/08/16 20:08:36 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:08:36 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:08:38 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:08:38 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:08:47 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:08:47 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:08:49 runner 6 connected 2025/08/16 20:08:56 runner 7 connected 2025/08/16 20:08:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:08:57 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:09:20 runner 8 connected 2025/08/16 20:09:25 runner 4 connected 2025/08/16 20:09:29 runner 5 connected 2025/08/16 20:09:31 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:09:31 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:09:38 runner 9 connected 2025/08/16 20:09:42 new: boot error: can't ssh into the instance 2025/08/16 20:09:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:09:42 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:10:12 new: boot error: can't ssh into the instance 2025/08/16 20:10:15 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:10:15 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:10:19 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:10:19 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:10:20 runner 6 connected 2025/08/16 20:10:26 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:10:26 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:10:29 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:10:29 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:10:32 runner 7 connected 2025/08/16 20:10:50 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:10:50 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:11:00 runner 4 connected 2025/08/16 20:11:05 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:11:05 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:11:07 runner 9 connected 2025/08/16 20:11:11 runner 8 connected 2025/08/16 20:11:23 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:11:23 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:11:31 runner 6 connected 2025/08/16 20:11:33 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:11:33 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:11:36 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:11:36 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:11:46 runner 7 connected 2025/08/16 20:11:53 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:11:53 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:12:03 new: boot error: can't ssh into the instance 2025/08/16 20:12:04 runner 4 connected 2025/08/16 20:12:05 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:12:05 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:12:14 runner 9 connected 2025/08/16 20:12:18 runner 8 connected 2025/08/16 20:12:29 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:12:29 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:12:34 runner 6 connected 2025/08/16 20:12:40 new: boot error: can't ssh into the instance 2025/08/16 20:12:41 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:12:41 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:12:44 runner 1 connected 2025/08/16 20:12:46 runner 7 connected 2025/08/16 20:12:57 STAT { "buffer too small": 0, "candidate triage jobs": 248, "candidates": 58797, "comps overflows": 0, "corpus": 18439, "corpus [files]": 3095, "corpus [symbols]": 6649, "cover overflows": 10549, "coverage": 234752, "distributor delayed": 43815, "distributor undelayed": 43671, "distributor violated": 962, "exec candidate": 18929, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 12, "exec seeds": 0, "exec smash": 0, "exec total [base]": 84352, "exec total [new]": 84892, "exec triage": 57359, "executor restarts": 1187, "fault jobs": 0, "fuzzer jobs": 248, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 3, "hints jobs": 0, "max signal": 240532, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 18929, "no exec duration": 5369573000000, "no exec requests": 13371, "pending": 312, "prog exec time": 188, "reproducing": 0, "rpc recv": 12912498552, "rpc sent": 1066534512, "signal": 232627, "smash jobs": 0, "triage jobs": 0, "vm output": 17725643, "vm restarts [base]": 34, "vm restarts [new]": 330 } 2025/08/16 20:13:02 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:13:02 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:13:13 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:13:13 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:13:19 runner 4 connected 2025/08/16 20:13:21 runner 0 connected 2025/08/16 20:13:22 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:13:22 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:13:25 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:13:25 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:13:30 runner 8 connected 2025/08/16 20:13:40 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:13:40 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:13:43 runner 9 connected 2025/08/16 20:13:48 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:13:48 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:13:55 runner 6 connected 2025/08/16 20:13:56 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:13:56 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:14:03 runner 1 connected 2025/08/16 20:14:08 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:14:08 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:14:20 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:14:20 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:14:21 runner 4 connected 2025/08/16 20:14:28 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:14:28 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:14:37 runner 8 connected 2025/08/16 20:14:43 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:14:43 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:14:49 runner 9 connected 2025/08/16 20:14:56 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:14:56 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:15:01 runner 6 connected 2025/08/16 20:15:10 runner 1 connected 2025/08/16 20:15:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:15:16 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:15:24 runner 4 connected 2025/08/16 20:15:27 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:15:27 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:15:34 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:15:34 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:15:37 runner 8 connected 2025/08/16 20:15:51 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:15:51 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:15:57 runner 9 connected 2025/08/16 20:16:04 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:16:04 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:16:08 runner 6 connected 2025/08/16 20:16:23 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:16:23 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:16:32 runner 4 connected 2025/08/16 20:16:36 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:16:36 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:16:45 runner 8 connected 2025/08/16 20:16:58 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:16:58 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:17:05 runner 9 connected 2025/08/16 20:17:17 runner 6 connected 2025/08/16 20:17:32 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:17:32 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:17:40 runner 4 connected 2025/08/16 20:17:48 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:17:48 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:17:57 STAT { "buffer too small": 0, "candidate triage jobs": 150, "candidates": 58320, "comps overflows": 0, "corpus": 18998, "corpus [files]": 3182, "corpus [symbols]": 6817, "cover overflows": 10845, "coverage": 236605, "distributor delayed": 45200, "distributor undelayed": 45101, "distributor violated": 962, "exec candidate": 19406, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 13, "exec seeds": 0, "exec smash": 0, "exec total [base]": 86734, "exec total [new]": 87334, "exec triage": 59044, "executor restarts": 1257, "fault jobs": 0, "fuzzer jobs": 150, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 242129, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 19406, "no exec duration": 5969209000000, "no exec requests": 14557, "pending": 334, "prog exec time": 304, "reproducing": 0, "rpc recv": 13579877148, "rpc sent": 1127574304, "signal": 234477, "smash jobs": 0, "triage jobs": 0, "vm output": 18578001, "vm restarts [base]": 34, "vm restarts [new]": 350 } 2025/08/16 20:18:00 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:18:00 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:18:14 runner 8 connected 2025/08/16 20:18:20 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:18:20 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:18:29 runner 9 connected 2025/08/16 20:18:40 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:18:40 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:18:41 runner 6 connected 2025/08/16 20:18:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:18:57 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:19:01 runner 4 connected 2025/08/16 20:19:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:19:06 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:19:21 runner 8 connected 2025/08/16 20:19:23 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:19:23 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:19:39 runner 9 connected 2025/08/16 20:19:40 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:19:40 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:19:48 new: boot error: can't ssh into the instance 2025/08/16 20:19:48 runner 6 connected 2025/08/16 20:19:58 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:19:58 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:20:07 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:20:07 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:20:17 new: boot error: can't ssh into the instance 2025/08/16 20:20:21 new: boot error: can't ssh into the instance 2025/08/16 20:20:22 runner 8 connected 2025/08/16 20:20:29 runner 3 connected 2025/08/16 20:20:39 runner 9 connected 2025/08/16 20:20:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:20:44 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:20:47 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:20:47 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:20:58 runner 2 connected 2025/08/16 20:21:00 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:21:00 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:21:02 runner 5 connected 2025/08/16 20:21:18 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:21:18 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:21:29 runner 3 connected 2025/08/16 20:21:41 runner 9 connected 2025/08/16 20:21:48 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:21:48 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:21:59 runner 2 connected 2025/08/16 20:22:03 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:22:03 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:22:13 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:22:13 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:22:30 runner 5 connected 2025/08/16 20:22:41 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:22:41 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:22:45 runner 9 connected 2025/08/16 20:22:54 runner 3 connected 2025/08/16 20:22:57 STAT { "buffer too small": 0, "candidate triage jobs": 261, "candidates": 57899, "comps overflows": 0, "corpus": 19305, "corpus [files]": 3232, "corpus [symbols]": 6911, "cover overflows": 11006, "coverage": 237602, "distributor delayed": 46343, "distributor undelayed": 46135, "distributor violated": 965, "exec candidate": 19827, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 13, "exec seeds": 0, "exec smash": 0, "exec total [base]": 88666, "exec total [new]": 89190, "exec triage": 60205, "executor restarts": 1307, "fault jobs": 0, "fuzzer jobs": 261, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 243490, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 19827, "no exec duration": 6408349000000, "no exec requests": 15479, "pending": 351, "prog exec time": 221, "reproducing": 0, "rpc recv": 14139767152, "rpc sent": 1182128000, "signal": 235470, "smash jobs": 0, "triage jobs": 0, "vm output": 19170494, "vm restarts [base]": 34, "vm restarts [new]": 368 } 2025/08/16 20:22:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:22:57 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:23:13 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:23:13 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:23:23 runner 2 connected 2025/08/16 20:23:30 new: boot error: can't ssh into the instance 2025/08/16 20:23:36 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:23:36 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:23:39 runner 5 connected 2025/08/16 20:23:52 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:23:52 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:23:53 new: boot error: can't ssh into the instance 2025/08/16 20:23:54 runner 3 connected 2025/08/16 20:24:12 runner 7 connected 2025/08/16 20:24:17 runner 9 connected 2025/08/16 20:24:26 base crash: KASAN: slab-use-after-free Read in __xfrm_state_lookup 2025/08/16 20:24:33 runner 2 connected 2025/08/16 20:24:41 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:24:41 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:24:46 patched crashed: lost connection to test machine [need repro = false] 2025/08/16 20:25:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:25:06 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:25:08 runner 0 connected 2025/08/16 20:25:09 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:25:09 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:25:11 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:25:11 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:25:22 runner 3 connected 2025/08/16 20:25:39 new: boot error: can't ssh into the instance 2025/08/16 20:25:41 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:25:41 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:25:47 runner 2 connected 2025/08/16 20:25:50 runner 9 connected 2025/08/16 20:25:53 runner 7 connected 2025/08/16 20:26:08 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:26:08 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:26:09 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:26:09 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:26:21 runner 1 connected 2025/08/16 20:26:23 runner 3 connected 2025/08/16 20:26:35 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:26:35 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:26:40 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:26:40 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:26:48 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:26:48 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:26:49 runner 2 connected 2025/08/16 20:26:51 runner 9 connected 2025/08/16 20:27:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:27:16 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:27:16 runner 7 connected 2025/08/16 20:27:21 runner 1 connected 2025/08/16 20:27:30 runner 3 connected 2025/08/16 20:27:39 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:27:39 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:27:51 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:27:51 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:27:57 timed out waiting for coprus triage 2025/08/16 20:27:57 starting bug reproductions 2025/08/16 20:27:57 starting bug reproductions (max 10 VMs, 7 repros) 2025/08/16 20:27:57 STAT { "buffer too small": 0, "candidate triage jobs": 82, "candidates": 57107, "comps overflows": 0, "corpus": 20261, "corpus [files]": 3368, "corpus [symbols]": 7212, "cover overflows": 11512, "coverage": 240719, "distributor delayed": 48467, "distributor undelayed": 48398, "distributor violated": 1120, "exec candidate": 20619, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 13, "exec seeds": 0, "exec smash": 0, "exec total [base]": 92539, "exec total [new]": 93242, "exec triage": 62801, "executor restarts": 1363, "fault jobs": 0, "fuzzer jobs": 82, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 2, "hints jobs": 0, "max signal": 245987, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 20619, "no exec duration": 6987455000000, "no exec requests": 16853, "pending": 368, "prog exec time": 247, "reproducing": 0, "rpc recv": 14804722276, "rpc sent": 1252484736, "signal": 238582, "smash jobs": 0, "triage jobs": 0, "vm output": 20161583, "vm restarts [base]": 35, "vm restarts [new]": 385 } 2025/08/16 20:27:57 reproduction of "WARNING in xfrm_state_fini" aborted: it's no longer needed 2025/08/16 20:27:57 reproduction of "possible deadlock in ocfs2_try_remove_refcount_tree" aborted: it's no longer needed 2025/08/16 20:27:57 reproduction of "WARNING in xfrm_state_fini" aborted: it's no longer needed 2025/08/16 20:27:57 reproduction of "kernel BUG in txUnlock" aborted: it's no longer needed 2025/08/16 20:27:57 start reproducing 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:27:57 runner 9 connected 2025/08/16 20:28:32 runner 7 connected 2025/08/16 20:28:33 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:28:33 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:28:38 runner 1 connected 2025/08/16 20:28:39 runner 3 connected 2025/08/16 20:28:56 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:28:56 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:29:01 base crash: INFO: trying to register non-static key in ocfs2_dlm_shutdown 2025/08/16 20:29:01 runner 2 connected 2025/08/16 20:29:04 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:29:04 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:29:15 runner 9 connected 2025/08/16 20:29:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:29:16 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:29:29 new: boot error: can't ssh into the instance 2025/08/16 20:29:37 runner 7 connected 2025/08/16 20:29:41 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:29:41 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:29:46 runner 1 connected 2025/08/16 20:29:52 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:29:52 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:29:57 runner 3 connected 2025/08/16 20:30:12 new: boot error: can't ssh into the instance 2025/08/16 20:30:13 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:30:13 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:30:23 runner 2 connected 2025/08/16 20:30:24 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:30:24 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:30:30 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:30:30 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:30:33 runner 9 connected 2025/08/16 20:30:49 new: boot error: can't ssh into the instance 2025/08/16 20:30:54 runner 6 connected 2025/08/16 20:30:54 runner 7 connected 2025/08/16 20:30:54 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:30:54 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:31:06 runner 1 connected 2025/08/16 20:31:08 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:31:08 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:31:12 runner 3 connected 2025/08/16 20:31:14 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:31:14 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:31:28 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:31:28 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:31:30 runner 8 connected 2025/08/16 20:31:33 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:31:33 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:31:35 runner 9 connected 2025/08/16 20:31:41 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:31:41 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:31:49 runner 2 connected 2025/08/16 20:31:55 runner 7 connected 2025/08/16 20:31:58 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:31:58 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:31:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:31:59 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:32:14 runner 6 connected 2025/08/16 20:32:14 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:32:14 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:32:22 runner 3 connected 2025/08/16 20:32:26 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:32:26 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:32:36 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:32:36 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:32:40 runner 9 connected 2025/08/16 20:32:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:32:44 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:32:56 runner 2 connected 2025/08/16 20:32:57 STAT { "buffer too small": 0, "candidate triage jobs": 16, "candidates": 56511, "comps overflows": 0, "corpus": 20917, "corpus [files]": 3464, "corpus [symbols]": 7405, "cover overflows": 11854, "coverage": 243285, "distributor delayed": 50165, "distributor undelayed": 50150, "distributor violated": 1120, "exec candidate": 21215, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 13, "exec seeds": 0, "exec smash": 0, "exec total [base]": 95341, "exec total [new]": 96148, "exec triage": 64703, "executor restarts": 1428, "fault jobs": 0, "fuzzer jobs": 16, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 0, "hints jobs": 0, "max signal": 248439, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 21215, "no exec duration": 7352933000000, "no exec requests": 17677, "pending": 384, "prog exec time": 163, "reproducing": 1, "rpc recv": 15542702036, "rpc sent": 1316782008, "signal": 241091, "smash jobs": 0, "triage jobs": 0, "vm output": 21178219, "vm restarts [base]": 35, "vm restarts [new]": 408 } 2025/08/16 20:33:00 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:33:00 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:33:07 runner 7 connected 2025/08/16 20:33:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:33:16 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:33:18 runner 6 connected 2025/08/16 20:33:26 runner 3 connected 2025/08/16 20:33:27 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:33:27 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:33:37 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:33:37 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:33:42 runner 9 connected 2025/08/16 20:33:45 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:33:45 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:33:57 runner 2 connected 2025/08/16 20:33:59 new: boot error: can't ssh into the instance 2025/08/16 20:34:03 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:34:03 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:34:08 runner 7 connected 2025/08/16 20:34:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:34:16 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:34:18 runner 6 connected 2025/08/16 20:34:26 runner 3 connected 2025/08/16 20:34:27 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:34:27 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:34:40 runner 0 connected 2025/08/16 20:34:44 runner 9 connected 2025/08/16 20:34:52 new: boot error: can't ssh into the instance 2025/08/16 20:34:57 runner 2 connected 2025/08/16 20:35:02 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:35:02 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:35:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:35:06 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:35:10 runner 7 connected 2025/08/16 20:35:13 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:35:13 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:35:17 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:35:17 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:35:33 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:35:33 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:35:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:35:44 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:35:47 runner 3 connected 2025/08/16 20:35:50 runner 6 connected 2025/08/16 20:35:54 runner 0 connected 2025/08/16 20:35:59 runner 2 connected 2025/08/16 20:36:15 runner 9 connected 2025/08/16 20:36:21 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:36:21 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:36:32 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:36:32 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:36:32 runner 7 connected 2025/08/16 20:36:33 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:36:33 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:36:41 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:36:41 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:36:49 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:36:49 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:36:50 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 20:36:54 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:36:54 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:37:02 runner 3 connected 2025/08/16 20:37:13 runner 2 connected 2025/08/16 20:37:15 runner 0 connected 2025/08/16 20:37:22 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:37:22 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:37:22 runner 9 connected 2025/08/16 20:37:30 runner 6 connected 2025/08/16 20:37:37 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:37:37 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:37:42 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 20:37:49 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:37:49 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:37:56 base crash: INFO: task hung in v9fs_evict_inode 2025/08/16 20:37:57 STAT { "buffer too small": 0, "candidate triage jobs": 193, "candidates": 55803, "comps overflows": 0, "corpus": 21433, "corpus [files]": 3528, "corpus [symbols]": 7534, "cover overflows": 12216, "coverage": 244839, "distributor delayed": 51877, "distributor undelayed": 51701, "distributor violated": 1120, "exec candidate": 21923, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 16, "exec seeds": 0, "exec smash": 0, "exec total [base]": 98337, "exec total [new]": 99170, "exec triage": 66540, "executor restarts": 1505, "fault jobs": 0, "fuzzer jobs": 193, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 2, "hints jobs": 0, "max signal": 250531, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 21923, "no exec duration": 7594560000000, "no exec requests": 18167, "pending": 407, "prog exec time": 266, "reproducing": 1, "rpc recv": 16336667984, "rpc sent": 1384790592, "signal": 242625, "smash jobs": 0, "triage jobs": 0, "vm output": 22539350, "vm restarts [base]": 35, "vm restarts [new]": 431 } 2025/08/16 20:37:57 base crash: INFO: task hung in v9fs_evict_inode 2025/08/16 20:38:03 runner 3 connected 2025/08/16 20:38:11 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:38:11 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:38:18 runner 0 connected 2025/08/16 20:38:23 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:38:23 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:38:30 base crash: INFO: task hung in v9fs_evict_inode 2025/08/16 20:38:37 runner 2 connected 2025/08/16 20:38:37 runner 3 connected 2025/08/16 20:38:55 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:38:55 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:38:59 runner 9 connected 2025/08/16 20:39:06 base: boot error: can't ssh into the instance 2025/08/16 20:39:08 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:39:08 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:39:11 runner 6 connected 2025/08/16 20:39:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:39:16 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:39:18 runner 2 connected 2025/08/16 20:39:32 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:39:32 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:39:33 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:39:33 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:39:43 runner 3 connected 2025/08/16 20:39:55 runner 1 connected 2025/08/16 20:40:05 runner 2 connected 2025/08/16 20:40:11 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:40:11 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:40:14 runner 9 connected 2025/08/16 20:40:20 runner 6 connected 2025/08/16 20:40:43 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:40:43 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:40:54 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:40:54 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:40:59 runner 3 connected 2025/08/16 20:41:25 runner 2 connected 2025/08/16 20:41:33 new: boot error: can't ssh into the instance 2025/08/16 20:41:36 runner 9 connected 2025/08/16 20:41:49 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:41:49 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:42:05 new: boot error: can't ssh into the instance 2025/08/16 20:42:15 base crash: WARNING in xfrm6_tunnel_net_exit 2025/08/16 20:42:21 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:42:21 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:42:23 runner 1 connected 2025/08/16 20:42:41 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:42:41 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:42:53 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:42:53 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:42:54 runner 8 connected 2025/08/16 20:42:56 runner 2 connected 2025/08/16 20:42:57 STAT { "buffer too small": 0, "candidate triage jobs": 160, "candidates": 54657, "comps overflows": 0, "corpus": 22605, "corpus [files]": 3655, "corpus [symbols]": 7840, "cover overflows": 12928, "coverage": 248464, "distributor delayed": 54719, "distributor undelayed": 54605, "distributor violated": 1336, "exec candidate": 23069, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 16, "exec seeds": 0, "exec smash": 0, "exec total [base]": 104385, "exec total [new]": 105166, "exec triage": 70139, "executor restarts": 1547, "fault jobs": 0, "fuzzer jobs": 160, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 254109, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 23069, "no exec duration": 7888646000000, "no exec requests": 19036, "pending": 421, "prog exec time": 188, "reproducing": 1, "rpc recv": 16928293916, "rpc sent": 1446115528, "signal": 246166, "smash jobs": 0, "triage jobs": 0, "vm output": 24393242, "vm restarts [base]": 39, "vm restarts [new]": 445 } 2025/08/16 20:43:08 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:43:08 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:43:09 runner 9 connected 2025/08/16 20:43:22 runner 2 connected 2025/08/16 20:43:25 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:43:25 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:43:35 runner 6 connected 2025/08/16 20:43:35 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:43:35 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:43:47 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:43:47 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:43:50 runner 1 connected 2025/08/16 20:44:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:44:06 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:44:07 runner 8 connected 2025/08/16 20:44:16 runner 9 connected 2025/08/16 20:44:17 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:44:17 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:44:28 runner 2 connected 2025/08/16 20:44:39 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:44:39 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:44:40 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 20:44:48 runner 6 connected 2025/08/16 20:44:49 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:44:49 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:44:54 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:44:54 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:44:59 runner 1 connected 2025/08/16 20:45:09 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:45:09 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:45:20 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:45:20 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:45:20 runner 8 connected 2025/08/16 20:45:31 runner 9 connected 2025/08/16 20:45:39 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:45:39 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:45:51 runner 6 connected 2025/08/16 20:46:02 runner 1 connected 2025/08/16 20:46:12 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:46:12 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:46:14 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:46:14 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:46:20 runner 8 connected 2025/08/16 20:46:35 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:46:35 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:46:54 runner 9 connected 2025/08/16 20:46:55 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:46:55 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:46:56 new: boot error: can't ssh into the instance 2025/08/16 20:46:56 runner 6 connected 2025/08/16 20:47:00 new: boot error: can't ssh into the instance 2025/08/16 20:47:16 runner 1 connected 2025/08/16 20:47:26 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:47:26 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:47:36 runner 8 connected 2025/08/16 20:47:36 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:47:36 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:47:41 runner 7 connected 2025/08/16 20:47:45 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:47:45 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:47:57 STAT { "buffer too small": 0, "candidate triage jobs": 34, "candidates": 54105, "comps overflows": 0, "corpus": 23274, "corpus [files]": 3733, "corpus [symbols]": 8020, "cover overflows": 13252, "coverage": 250407, "distributor delayed": 56402, "distributor undelayed": 56371, "distributor violated": 1336, "exec candidate": 23621, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 16, "exec seeds": 0, "exec smash": 0, "exec total [base]": 107101, "exec total [new]": 107999, "exec triage": 71990, "executor restarts": 1604, "fault jobs": 0, "fuzzer jobs": 34, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 0, "hints jobs": 0, "max signal": 255825, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 23621, "no exec duration": 8319607000000, "no exec requests": 20053, "pending": 440, "prog exec time": 211, "reproducing": 1, "rpc recv": 17629363780, "rpc sent": 1506616784, "signal": 248021, "smash jobs": 0, "triage jobs": 0, "vm output": 25536872, "vm restarts [base]": 39, "vm restarts [new]": 464 } 2025/08/16 20:48:02 base crash: KASAN: slab-use-after-free Read in xfrm_alloc_spi 2025/08/16 20:48:03 base: boot error: can't ssh into the instance 2025/08/16 20:48:03 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:48:03 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:48:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:48:06 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:48:17 runner 6 connected 2025/08/16 20:48:26 runner 1 connected 2025/08/16 20:48:36 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:48:36 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:48:43 runner 3 connected 2025/08/16 20:48:43 runner 0 connected 2025/08/16 20:48:44 runner 8 connected 2025/08/16 20:48:47 runner 7 connected 2025/08/16 20:48:48 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:48:48 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:49:12 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:49:12 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:49:13 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:49:13 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:49:13 new: boot error: can't ssh into the instance 2025/08/16 20:49:17 runner 6 connected 2025/08/16 20:49:29 runner 1 connected 2025/08/16 20:49:43 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:49:43 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:49:53 runner 7 connected 2025/08/16 20:49:54 runner 8 connected 2025/08/16 20:49:54 runner 0 connected 2025/08/16 20:50:19 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:50:19 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:50:25 runner 6 connected 2025/08/16 20:50:31 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:50:31 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:50:34 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:50:34 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:50:47 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:50:47 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:50:52 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 20:51:00 runner 1 connected 2025/08/16 20:51:12 runner 0 connected 2025/08/16 20:51:21 runner 8 connected 2025/08/16 20:51:27 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:51:27 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:51:28 runner 7 connected 2025/08/16 20:51:33 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 20:51:40 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:51:40 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:51:54 new: boot error: can't ssh into the instance 2025/08/16 20:51:59 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 20:52:16 runner 6 connected 2025/08/16 20:52:29 runner 1 connected 2025/08/16 20:52:31 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:52:31 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:52:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:52:42 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:52:42 runner 3 connected 2025/08/16 20:52:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:52:44 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:52:54 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:52:54 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:52:57 STAT { "buffer too small": 0, "candidate triage jobs": 91, "candidates": 53042, "comps overflows": 0, "corpus": 24273, "corpus [files]": 3866, "corpus [symbols]": 8304, "cover overflows": 13872, "coverage": 252839, "distributor delayed": 58892, "distributor undelayed": 58874, "distributor violated": 1451, "exec candidate": 24684, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 20, "exec seeds": 0, "exec smash": 0, "exec total [base]": 112256, "exec total [new]": 113197, "exec triage": 75156, "executor restarts": 1658, "fault jobs": 0, "fuzzer jobs": 91, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 258415, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 2, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 24684, "no exec duration": 9054576000000, "no exec requests": 22083, "pending": 457, "prog exec time": 202, "reproducing": 1, "rpc recv": 18302569984, "rpc sent": 1579743824, "signal": 250483, "smash jobs": 0, "triage jobs": 0, "vm output": 26774226, "vm restarts [base]": 41, "vm restarts [new]": 481 } 2025/08/16 20:53:00 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:53:00 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:53:10 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:53:10 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:53:13 runner 7 connected 2025/08/16 20:53:23 runner 8 connected 2025/08/16 20:53:25 runner 0 connected 2025/08/16 20:53:35 runner 6 connected 2025/08/16 20:53:41 runner 1 connected 2025/08/16 20:53:49 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:53:49 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:53:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:53:57 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:53:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:53:59 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:54:04 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:54:04 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:54:09 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:54:09 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:54:30 runner 7 connected 2025/08/16 20:54:38 runner 8 connected 2025/08/16 20:54:45 runner 6 connected 2025/08/16 20:54:49 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:54:49 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:54:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:54:57 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:54:59 new: boot error: can't ssh into the instance 2025/08/16 20:55:04 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:55:04 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:55:30 runner 7 connected 2025/08/16 20:55:39 runner 8 connected 2025/08/16 20:55:40 runner 2 connected 2025/08/16 20:56:03 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:56:03 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:56:14 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:56:14 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:56:35 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:56:35 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:56:44 runner 7 connected 2025/08/16 20:56:55 runner 2 connected 2025/08/16 20:57:14 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:57:14 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:57:27 base crash: INFO: task hung in read_part_sector 2025/08/16 20:57:31 new: boot error: can't ssh into the instance 2025/08/16 20:57:34 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:57:34 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:57:55 runner 2 connected 2025/08/16 20:57:57 STAT { "buffer too small": 0, "candidate triage jobs": 196, "candidates": 52545, "comps overflows": 0, "corpus": 24658, "corpus [files]": 3920, "corpus [symbols]": 8416, "cover overflows": 14204, "coverage": 253824, "distributor delayed": 60103, "distributor undelayed": 59911, "distributor violated": 1510, "exec candidate": 25181, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 20, "exec seeds": 0, "exec smash": 0, "exec total [base]": 114550, "exec total [new]": 115359, "exec triage": 76377, "executor restarts": 1699, "fault jobs": 0, "fuzzer jobs": 196, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 0, "hints jobs": 0, "max signal": 259909, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 2, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 25181, "no exec duration": 9588510000000, "no exec requests": 23378, "pending": 472, "prog exec time": 0, "reproducing": 1, "rpc recv": 18739161260, "rpc sent": 1618939080, "signal": 251457, "smash jobs": 0, "triage jobs": 0, "vm output": 27757886, "vm restarts [base]": 41, "vm restarts [new]": 495 } 2025/08/16 20:58:08 runner 0 connected 2025/08/16 20:58:13 runner 9 connected 2025/08/16 20:58:15 runner 7 connected 2025/08/16 20:58:30 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:58:30 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:58:32 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:58:32 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:58:41 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:58:41 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:59:11 runner 2 connected 2025/08/16 20:59:13 runner 9 connected 2025/08/16 20:59:22 runner 7 connected 2025/08/16 20:59:33 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:59:33 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:59:34 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:59:34 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 20:59:45 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 20:59:45 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 21:00:14 runner 9 connected 2025/08/16 21:00:15 runner 2 connected 2025/08/16 21:00:17 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:00:26 runner 7 connected 2025/08/16 21:00:34 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 21:00:34 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 21:00:45 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 21:00:45 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 21:01:02 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:01:15 runner 2 connected 2025/08/16 21:01:26 runner 7 connected 2025/08/16 21:01:31 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:01:47 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 21:01:47 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 21:01:58 base crash: KASAN: slab-use-after-free Read in xfrm_alloc_spi 2025/08/16 21:01:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 21:01:59 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 21:01:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 21:01:59 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 21:02:07 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:02:29 runner 2 connected 2025/08/16 21:02:35 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:02:40 runner 9 connected 2025/08/16 21:02:40 runner 3 connected 2025/08/16 21:02:40 runner 7 connected 2025/08/16 21:02:54 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 21:02:54 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 21:02:57 STAT { "buffer too small": 0, "candidate triage jobs": 41, "candidates": 52103, "comps overflows": 0, "corpus": 25243, "corpus [files]": 3976, "corpus [symbols]": 8571, "cover overflows": 14499, "coverage": 255385, "distributor delayed": 61244, "distributor undelayed": 61206, "distributor violated": 1657, "exec candidate": 25623, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 20, "exec seeds": 0, "exec smash": 0, "exec total [base]": 117182, "exec total [new]": 118168, "exec triage": 78046, "executor restarts": 1743, "fault jobs": 0, "fuzzer jobs": 41, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 260946, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 5, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 25623, "no exec duration": 10171703000000, "no exec requests": 24863, "pending": 484, "prog exec time": 187, "reproducing": 1, "rpc recv": 19275489792, "rpc sent": 1665418928, "signal": 252991, "smash jobs": 0, "triage jobs": 0, "vm output": 28555956, "vm restarts [base]": 43, "vm restarts [new]": 508 } 2025/08/16 21:03:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 21:03:06 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 21:03:09 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = true] 2025/08/16 21:03:09 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 21:03:12 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:03:12 repro finished 'WARNING in folio_lruvec_lock_irqsave', repro=true crepro=false desc='WARNING in folio_lruvec_lock_irqsave' hub=false from_dashboard=false 2025/08/16 21:03:12 found repro for "WARNING in folio_lruvec_lock_irqsave" (orig title: "-SAME-", reliability: 1), took 35.25 minutes 2025/08/16 21:03:12 "WARNING in folio_lruvec_lock_irqsave": saved crash log into 1755378192.crash.log 2025/08/16 21:03:12 "WARNING in folio_lruvec_lock_irqsave": saved repro log into 1755378192.repro.log 2025/08/16 21:03:12 start reproducing 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 21:03:15 new: boot error: can't ssh into the instance 2025/08/16 21:03:36 runner 2 connected 2025/08/16 21:03:48 runner 9 connected 2025/08/16 21:03:50 runner 7 connected 2025/08/16 21:03:55 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:03:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:03:57 runner 3 connected 2025/08/16 21:04:05 new: boot error: can't ssh into the instance 2025/08/16 21:04:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:04:10 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:04:14 new: boot error: can't ssh into the instance 2025/08/16 21:04:20 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:04:46 runner 0 connected 2025/08/16 21:04:46 runner 2 connected 2025/08/16 21:04:47 runner 9 connected 2025/08/16 21:04:52 runner 7 connected 2025/08/16 21:04:55 attempt #0 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:04:55 runner 1 connected 2025/08/16 21:04:59 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:05:02 runner 3 connected 2025/08/16 21:05:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:05:09 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:05:10 new: boot error: can't ssh into the instance 2025/08/16 21:05:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:05:20 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:05:26 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:05:30 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:05:47 runner 0 connected 2025/08/16 21:05:57 runner 9 connected 2025/08/16 21:06:07 runner 7 connected 2025/08/16 21:06:11 runner 3 connected 2025/08/16 21:06:12 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:06:21 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:06:33 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:06:37 attempt #1 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:06:38 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:06:41 new: boot error: can't ssh into the instance 2025/08/16 21:06:54 runner 0 connected 2025/08/16 21:07:02 runner 9 connected 2025/08/16 21:07:14 runner 7 connected 2025/08/16 21:07:19 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:07:19 runner 3 connected 2025/08/16 21:07:22 runner 8 connected 2025/08/16 21:07:53 base crash: possible deadlock in ocfs2_reserve_suballoc_bits 2025/08/16 21:07:54 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/16 21:07:57 STAT { "buffer too small": 0, "candidate triage jobs": 102, "candidates": 51597, "comps overflows": 0, "corpus": 25676, "corpus [files]": 4049, "corpus [symbols]": 8713, "cover overflows": 14720, "coverage": 256717, "distributor delayed": 62560, "distributor undelayed": 62462, "distributor violated": 1657, "exec candidate": 26129, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 24, "exec seeds": 0, "exec smash": 0, "exec total [base]": 119375, "exec total [new]": 120616, "exec triage": 79484, "executor restarts": 1805, "fault jobs": 0, "fuzzer jobs": 102, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 262431, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 8, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 26129, "no exec duration": 10461798000000, "no exec requests": 25562, "pending": 485, "prog exec time": 236, "reproducing": 1, "rpc recv": 19906417080, "rpc sent": 1722727976, "signal": 254321, "smash jobs": 0, "triage jobs": 0, "vm output": 29453777, "vm restarts [base]": 43, "vm restarts [new]": 527 } 2025/08/16 21:07:57 base crash: possible deadlock in ocfs2_init_acl 2025/08/16 21:07:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:08:05 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/16 21:08:07 runner 0 connected 2025/08/16 21:08:08 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:08:20 attempt #2 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:08:20 patched-only: WARNING in folio_lruvec_lock_irqsave 2025/08/16 21:08:20 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave (full)' 2025/08/16 21:08:20 start reproducing 'WARNING in folio_lruvec_lock_irqsave (full)' 2025/08/16 21:08:41 runner 1 connected 2025/08/16 21:08:42 runner 7 connected 2025/08/16 21:08:46 runner 2 connected 2025/08/16 21:08:46 runner 9 connected 2025/08/16 21:08:48 runner 3 connected 2025/08/16 21:08:50 runner 8 connected 2025/08/16 21:09:03 runner 0 connected 2025/08/16 21:09:05 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:09:05 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:09:09 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:09:20 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:09:26 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:09:47 runner 7 connected 2025/08/16 21:09:47 runner 9 connected 2025/08/16 21:09:50 runner 8 connected 2025/08/16 21:10:02 runner 3 connected 2025/08/16 21:10:14 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:10:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:10:26 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:10:30 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:10:31 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:10:55 runner 9 connected 2025/08/16 21:10:57 runner 8 connected 2025/08/16 21:11:08 runner 7 connected 2025/08/16 21:11:12 runner 3 connected 2025/08/16 21:11:18 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:11:39 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:11:51 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:11:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:12:00 runner 8 connected 2025/08/16 21:12:01 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:12:33 runner 9 connected 2025/08/16 21:12:37 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:12:40 runner 3 connected 2025/08/16 21:12:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:12:43 runner 7 connected 2025/08/16 21:12:52 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:12:57 STAT { "buffer too small": 0, "candidate triage jobs": 104, "candidates": 51062, "comps overflows": 0, "corpus": 26203, "corpus [files]": 4108, "corpus [symbols]": 8857, "cover overflows": 14988, "coverage": 257927, "distributor delayed": 63963, "distributor undelayed": 63859, "distributor violated": 1718, "exec candidate": 26664, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 24, "exec seeds": 0, "exec smash": 0, "exec total [base]": 122201, "exec total [new]": 123202, "exec triage": 81096, "executor restarts": 1855, "fault jobs": 0, "fuzzer jobs": 104, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 263715, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 10, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 26664, "no exec duration": 10809018000000, "no exec requests": 26370, "pending": 485, "prog exec time": 294, "reproducing": 2, "rpc recv": 20572108620, "rpc sent": 1773839968, "signal": 255523, "smash jobs": 0, "triage jobs": 0, "vm output": 30252506, "vm restarts [base]": 46, "vm restarts [new]": 544 } 2025/08/16 21:13:03 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:13:18 new: boot error: can't ssh into the instance 2025/08/16 21:13:23 runner 8 connected 2025/08/16 21:13:33 runner 9 connected 2025/08/16 21:13:35 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:13:42 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:13:44 runner 7 connected 2025/08/16 21:13:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:14:16 runner 3 connected 2025/08/16 21:14:17 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:14:37 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:14:45 runner 8 connected 2025/08/16 21:14:50 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:14:51 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:14:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:15:13 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:15:15 new: boot error: can't ssh into the instance 2025/08/16 21:15:16 new: boot error: can't ssh into the instance 2025/08/16 21:15:19 runner 7 connected 2025/08/16 21:15:21 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:15:26 new: boot error: can't ssh into the instance 2025/08/16 21:15:37 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:15:41 runner 9 connected 2025/08/16 21:15:54 runner 8 connected 2025/08/16 21:15:56 runner 2 connected 2025/08/16 21:15:58 runner 6 connected 2025/08/16 21:16:00 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:16:00 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:16:00 repro finished 'WARNING in folio_lruvec_lock_irqsave', repro=true crepro=false desc='WARNING in folio_lruvec_lock_irqsave' hub=false from_dashboard=false 2025/08/16 21:16:00 found repro for "WARNING in folio_lruvec_lock_irqsave" (orig title: "-SAME-", reliability: 1), took 12.57 minutes 2025/08/16 21:16:00 "WARNING in folio_lruvec_lock_irqsave": saved crash log into 1755378960.crash.log 2025/08/16 21:16:00 "WARNING in folio_lruvec_lock_irqsave": saved repro log into 1755378960.repro.log 2025/08/16 21:16:00 start reproducing 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 21:16:07 runner 1 connected 2025/08/16 21:16:15 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:16:16 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:16:19 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:16:22 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:16:25 new: boot error: can't ssh into the instance 2025/08/16 21:16:29 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:16:50 runner 9 connected 2025/08/16 21:16:57 runner 8 connected 2025/08/16 21:17:00 runner 2 connected 2025/08/16 21:17:03 runner 6 connected 2025/08/16 21:17:08 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:17:10 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:17:11 runner 1 connected 2025/08/16 21:17:20 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:17:23 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:17:35 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:17:44 attempt #0 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:17:48 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:17:55 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:17:56 runner 9 connected 2025/08/16 21:17:57 STAT { "buffer too small": 0, "candidate triage jobs": 185, "candidates": 50415, "comps overflows": 0, "corpus": 26761, "corpus [files]": 4179, "corpus [symbols]": 9018, "cover overflows": 15266, "coverage": 259431, "distributor delayed": 65469, "distributor undelayed": 65285, "distributor violated": 1773, "exec candidate": 27311, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 24, "exec seeds": 0, "exec smash": 0, "exec total [base]": 125341, "exec total [new]": 126292, "exec triage": 82948, "executor restarts": 1902, "fault jobs": 0, "fuzzer jobs": 185, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 0, "hints jobs": 0, "max signal": 265440, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 10, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 27311, "no exec duration": 11346082000000, "no exec requests": 27713, "pending": 484, "prog exec time": 244, "reproducing": 2, "rpc recv": 21121406148, "rpc sent": 1831676248, "signal": 256999, "smash jobs": 0, "triage jobs": 0, "vm output": 31230045, "vm restarts [base]": 46, "vm restarts [new]": 561 } 2025/08/16 21:18:10 runner 8 connected 2025/08/16 21:18:15 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:18:16 runner 6 connected 2025/08/16 21:18:22 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:18:29 runner 2 connected 2025/08/16 21:18:30 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:18:31 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:18:34 base crash: WARNING in xfrm_state_fini 2025/08/16 21:18:36 runner 1 connected 2025/08/16 21:18:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:18:49 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:19:01 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:19:13 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:19:20 runner 9 connected 2025/08/16 21:19:22 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:19:26 attempt #1 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:19:30 runner 8 connected 2025/08/16 21:19:31 runner 6 connected 2025/08/16 21:19:37 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:19:43 runner 2 connected 2025/08/16 21:19:48 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:19:48 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:19:55 runner 1 connected 2025/08/16 21:19:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:20:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:20:08 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:20:18 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:20:41 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:20:49 runner 2 connected 2025/08/16 21:20:53 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:21:01 runner 1 connected 2025/08/16 21:21:13 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:21:17 attempt #2 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:21:17 patched-only: WARNING in folio_lruvec_lock_irqsave 2025/08/16 21:21:17 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave (full)' 2025/08/16 21:21:25 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:21:49 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:21:49 repro finished 'WARNING in folio_lruvec_lock_irqsave (full)', repro=true crepro=true desc='WARNING in folio_lruvec_lock_irqsave' hub=false from_dashboard=false 2025/08/16 21:21:49 found repro for "WARNING in folio_lruvec_lock_irqsave" (orig title: "-SAME-", reliability: 1), took 13.47 minutes 2025/08/16 21:21:49 "WARNING in folio_lruvec_lock_irqsave": saved crash log into 1755379309.crash.log 2025/08/16 21:21:49 start reproducing 'WARNING in folio_lruvec_lock_irqsave (full)' 2025/08/16 21:21:49 "WARNING in folio_lruvec_lock_irqsave": saved repro log into 1755379309.repro.log 2025/08/16 21:21:54 runner 2 connected 2025/08/16 21:21:57 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:21:57 repro finished 'WARNING in folio_lruvec_lock_irqsave', repro=true crepro=false desc='WARNING in folio_lruvec_lock_irqsave' hub=false from_dashboard=false 2025/08/16 21:21:57 found repro for "WARNING in folio_lruvec_lock_irqsave" (orig title: "-SAME-", reliability: 1), took 5.94 minutes 2025/08/16 21:21:57 "WARNING in folio_lruvec_lock_irqsave": saved crash log into 1755379317.crash.log 2025/08/16 21:21:57 "WARNING in folio_lruvec_lock_irqsave": saved repro log into 1755379317.repro.log 2025/08/16 21:21:57 start reproducing 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 21:22:06 runner 1 connected 2025/08/16 21:22:39 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:22:41 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:22:53 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:22:57 STAT { "buffer too small": 0, "candidate triage jobs": 300, "candidates": 49971, "comps overflows": 0, "corpus": 27081, "corpus [files]": 4218, "corpus [symbols]": 9107, "cover overflows": 15449, "coverage": 260510, "distributor delayed": 66601, "distributor undelayed": 66302, "distributor violated": 1774, "exec candidate": 27755, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 24, "exec seeds": 0, "exec smash": 0, "exec total [base]": 127348, "exec total [new]": 128206, "exec triage": 84003, "executor restarts": 1952, "fault jobs": 0, "fuzzer jobs": 300, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 0, "hints jobs": 0, "max signal": 266705, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 13, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 27755, "no exec duration": 11439536000000, "no exec requests": 27883, "pending": 483, "prog exec time": 233, "reproducing": 2, "rpc recv": 21583320220, "rpc sent": 1877815152, "signal": 258083, "smash jobs": 0, "triage jobs": 0, "vm output": 32010879, "vm restarts [base]": 46, "vm restarts [new]": 574 } 2025/08/16 21:23:01 attempt #0 to run "WARNING in folio_lruvec_lock_irqsave" on base: aborting due to context cancelation 2025/08/16 21:23:20 runner 2 connected 2025/08/16 21:23:22 runner 1 connected 2025/08/16 21:23:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:23:42 runner 0 connected 2025/08/16 21:23:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:23:46 attempt #0 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:23:57 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:24:23 runner 2 connected 2025/08/16 21:24:23 runner 1 connected 2025/08/16 21:24:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:24:46 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:24:56 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:24:56 new: boot error: can't ssh into the instance 2025/08/16 21:25:23 runner 1 connected 2025/08/16 21:25:27 runner 2 connected 2025/08/16 21:25:32 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:25:37 runner 3 connected 2025/08/16 21:25:43 new: boot error: can't ssh into the instance 2025/08/16 21:25:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:25:46 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:25:53 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:26:17 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:26:24 runner 7 connected 2025/08/16 21:26:25 runner 1 connected 2025/08/16 21:26:27 runner 2 connected 2025/08/16 21:26:38 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:26:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:26:48 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:26:49 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:26:52 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:26:59 runner 3 connected 2025/08/16 21:27:23 runner 7 connected 2025/08/16 21:27:23 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:27:29 runner 2 connected 2025/08/16 21:27:33 runner 1 connected 2025/08/16 21:27:48 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:27:57 STAT { "buffer too small": 0, "candidate triage jobs": 116, "candidates": 49810, "comps overflows": 0, "corpus": 27421, "corpus [files]": 4268, "corpus [symbols]": 9210, "cover overflows": 15548, "coverage": 262565, "distributor delayed": 67268, "distributor undelayed": 67170, "distributor violated": 1774, "exec candidate": 27916, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 24, "exec seeds": 0, "exec smash": 0, "exec total [base]": 128329, "exec total [new]": 129415, "exec triage": 84879, "executor restarts": 1989, "fault jobs": 0, "fuzzer jobs": 116, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 267056, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 16, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 27916, "no exec duration": 11486935000000, "no exec requests": 27980, "pending": 483, "prog exec time": 202, "reproducing": 2, "rpc recv": 22071113736, "rpc sent": 1915862200, "signal": 260099, "smash jobs": 0, "triage jobs": 0, "vm output": 32602701, "vm restarts [base]": 47, "vm restarts [new]": 588 } 2025/08/16 21:27:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:28:05 runner 3 connected 2025/08/16 21:28:14 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:28:29 runner 2 connected 2025/08/16 21:28:31 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:28:39 base: boot error: can't ssh into the instance 2025/08/16 21:28:41 runner 1 connected 2025/08/16 21:28:45 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:28:56 runner 7 connected 2025/08/16 21:29:04 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:29:13 runner 3 connected 2025/08/16 21:29:15 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:29:20 runner 2 connected 2025/08/16 21:29:25 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:29:28 new: boot error: can't ssh into the instance 2025/08/16 21:29:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:29:46 runner 2 connected 2025/08/16 21:29:49 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:29:53 new: boot error: can't ssh into the instance 2025/08/16 21:29:56 runner 1 connected 2025/08/16 21:30:02 new: boot error: can't ssh into the instance 2025/08/16 21:30:07 runner 7 connected 2025/08/16 21:30:11 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:30:12 new: boot error: can't ssh into the instance 2025/08/16 21:30:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:30:22 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:30:23 runner 3 connected 2025/08/16 21:30:28 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:30:35 runner 9 connected 2025/08/16 21:30:43 runner 8 connected 2025/08/16 21:30:53 runner 2 connected 2025/08/16 21:30:56 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:30:57 runner 1 connected 2025/08/16 21:30:59 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:31:00 runner 6 connected 2025/08/16 21:31:18 runner 7 connected 2025/08/16 21:31:29 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:31:40 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:31:43 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:31:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:31:45 runner 9 connected 2025/08/16 21:31:47 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:31:52 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:32:01 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:32:07 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:32:18 runner 3 connected 2025/08/16 21:32:24 runner 1 connected 2025/08/16 21:32:25 runner 6 connected 2025/08/16 21:32:28 runner 8 connected 2025/08/16 21:32:29 runner 7 connected 2025/08/16 21:32:33 runner 2 connected 2025/08/16 21:32:38 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:32:43 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:32:44 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:32:49 runner 9 connected 2025/08/16 21:32:49 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:32:57 STAT { "buffer too small": 0, "candidate triage jobs": 88, "candidates": 49078, "comps overflows": 0, "corpus": 28168, "corpus [files]": 4384, "corpus [symbols]": 9437, "cover overflows": 15876, "coverage": 264465, "distributor delayed": 69141, "distributor undelayed": 69064, "distributor violated": 1791, "exec candidate": 28648, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 24, "exec seeds": 0, "exec smash": 0, "exec total [base]": 131856, "exec total [new]": 132999, "exec triage": 87031, "executor restarts": 2061, "fault jobs": 0, "fuzzer jobs": 88, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 268938, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 17, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 28648, "no exec duration": 11782206000000, "no exec requests": 28751, "pending": 483, "prog exec time": 203, "reproducing": 2, "rpc recv": 22873819588, "rpc sent": 1995760760, "signal": 261979, "smash jobs": 0, "triage jobs": 0, "vm output": 33679893, "vm restarts [base]": 48, "vm restarts [new]": 611 } 2025/08/16 21:32:59 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:33:03 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:33:09 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:33:14 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:33:20 runner 3 connected 2025/08/16 21:33:25 runner 1 connected 2025/08/16 21:33:26 runner 6 connected 2025/08/16 21:33:30 runner 7 connected 2025/08/16 21:33:39 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:33:40 runner 8 connected 2025/08/16 21:33:45 runner 2 connected 2025/08/16 21:33:47 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:33:52 base: boot error: can't ssh into the instance 2025/08/16 21:33:54 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:33:55 runner 9 connected 2025/08/16 21:33:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:34:03 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:34:05 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:34:09 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:34:26 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:34:27 runner 3 connected 2025/08/16 21:34:35 runner 1 connected 2025/08/16 21:34:35 runner 6 connected 2025/08/16 21:34:44 runner 7 connected 2025/08/16 21:34:51 runner 2 connected 2025/08/16 21:34:52 runner 8 connected 2025/08/16 21:34:54 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:34:56 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:35:07 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:35:08 runner 9 connected 2025/08/16 21:35:08 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:35:26 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:35:32 base crash: possible deadlock in ocfs2_init_acl 2025/08/16 21:35:34 attempt #1 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:35:38 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:35:45 runner 3 connected 2025/08/16 21:35:54 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:35:55 runner 6 connected 2025/08/16 21:35:57 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:36:13 runner 3 connected 2025/08/16 21:36:15 runner 2 connected 2025/08/16 21:36:18 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:36:23 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:36:25 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:36:26 runner 8 connected 2025/08/16 21:36:34 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:36:38 runner 9 connected 2025/08/16 21:36:52 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:36:55 new: boot error: can't ssh into the instance 2025/08/16 21:36:59 runner 6 connected 2025/08/16 21:37:04 runner 3 connected 2025/08/16 21:37:05 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:37:15 runner 2 connected 2025/08/16 21:37:21 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:37:24 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:37:24 attempt #2 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:37:24 patched-only: WARNING in folio_lruvec_lock_irqsave 2025/08/16 21:37:24 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave (full)' 2025/08/16 21:37:34 runner 8 connected 2025/08/16 21:37:34 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:37:46 runner 9 connected 2025/08/16 21:37:57 STAT { "buffer too small": 0, "candidate triage jobs": 60, "candidates": 48508, "comps overflows": 0, "corpus": 28757, "corpus [files]": 4460, "corpus [symbols]": 9593, "cover overflows": 16060, "coverage": 265725, "distributor delayed": 70709, "distributor undelayed": 70652, "distributor violated": 1791, "exec candidate": 29218, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 24, "exec seeds": 0, "exec smash": 0, "exec total [base]": 134730, "exec total [new]": 135955, "exec triage": 88848, "executor restarts": 2135, "fault jobs": 0, "fuzzer jobs": 60, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 270089, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 17, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 29218, "no exec duration": 12065199000000, "no exec requests": 29352, "pending": 484, "prog exec time": 59, "reproducing": 2, "rpc recv": 23697357832, "rpc sent": 2065123800, "signal": 263240, "smash jobs": 0, "triage jobs": 0, "vm output": 34671288, "vm restarts [base]": 49, "vm restarts [new]": 635 } 2025/08/16 21:38:00 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:38:02 runner 6 connected 2025/08/16 21:38:03 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:38:03 repro finished 'WARNING in folio_lruvec_lock_irqsave', repro=true crepro=false desc='WARNING in folio_lruvec_lock_irqsave' hub=false from_dashboard=false 2025/08/16 21:38:03 found repro for "WARNING in folio_lruvec_lock_irqsave" (orig title: "-SAME-", reliability: 1), took 16.09 minutes 2025/08/16 21:38:03 "WARNING in folio_lruvec_lock_irqsave": saved crash log into 1755380283.crash.log 2025/08/16 21:38:03 "WARNING in folio_lruvec_lock_irqsave": saved repro log into 1755380283.repro.log 2025/08/16 21:38:03 start reproducing 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 21:38:05 runner 3 connected 2025/08/16 21:38:10 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:38:11 runner 1 connected 2025/08/16 21:38:16 runner 2 connected 2025/08/16 21:38:34 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:38:38 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:38:42 runner 8 connected 2025/08/16 21:38:45 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:38:52 runner 9 connected 2025/08/16 21:39:07 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:39:16 runner 3 connected 2025/08/16 21:39:18 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:39:18 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:39:19 runner 6 connected 2025/08/16 21:39:27 runner 2 connected 2025/08/16 21:39:39 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:39:41 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:39:46 attempt #0 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:39:48 runner 8 connected 2025/08/16 21:39:52 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:39:59 runner 9 connected 2025/08/16 21:40:20 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:40:20 runner 6 connected 2025/08/16 21:40:22 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:40:22 runner 3 connected 2025/08/16 21:40:28 new: boot error: can't ssh into the instance 2025/08/16 21:40:34 runner 2 connected 2025/08/16 21:40:46 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:40:56 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:41:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:41:08 runner 8 connected 2025/08/16 21:41:10 runner 9 connected 2025/08/16 21:41:28 runner 3 connected 2025/08/16 21:41:29 attempt #1 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:41:31 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:41:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:41:43 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:41:43 repro finished 'WARNING in folio_lruvec_lock_irqsave (full)', repro=true crepro=true desc='WARNING in folio_lruvec_lock_irqsave' hub=false from_dashboard=false 2025/08/16 21:41:43 found repro for "WARNING in folio_lruvec_lock_irqsave" (orig title: "-SAME-", reliability: 1), took 19.90 minutes 2025/08/16 21:41:43 "WARNING in folio_lruvec_lock_irqsave": saved crash log into 1755380503.crash.log 2025/08/16 21:41:43 start reproducing 'WARNING in folio_lruvec_lock_irqsave (full)' 2025/08/16 21:41:43 "WARNING in folio_lruvec_lock_irqsave": saved repro log into 1755380503.repro.log 2025/08/16 21:41:44 runner 6 connected 2025/08/16 21:41:47 runner 2 connected 2025/08/16 21:41:55 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:42:09 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:42:13 runner 9 connected 2025/08/16 21:42:13 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:42:23 runner 8 connected 2025/08/16 21:42:34 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:42:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:42:49 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:42:51 runner 6 connected 2025/08/16 21:42:55 runner 2 connected 2025/08/16 21:42:56 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:42:57 STAT { "buffer too small": 0, "candidate triage jobs": 23, "candidates": 48139, "comps overflows": 0, "corpus": 29155, "corpus [files]": 4515, "corpus [symbols]": 9705, "cover overflows": 16256, "coverage": 266662, "distributor delayed": 71671, "distributor undelayed": 71671, "distributor violated": 1791, "exec candidate": 29587, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 24, "exec seeds": 0, "exec smash": 0, "exec total [base]": 136594, "exec total [new]": 137918, "exec triage": 90010, "executor restarts": 2194, "fault jobs": 0, "fuzzer jobs": 23, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 0, "hints jobs": 0, "max signal": 270994, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 19, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 29586, "no exec duration": 12346920000000, "no exec requests": 30070, "pending": 482, "prog exec time": 207, "reproducing": 2, "rpc recv": 24410690968, "rpc sent": 2125205200, "signal": 264173, "smash jobs": 0, "triage jobs": 0, "vm output": 35468815, "vm restarts [base]": 50, "vm restarts [new]": 657 } 2025/08/16 21:43:15 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:43:20 attempt #2 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:43:20 patched-only: WARNING in folio_lruvec_lock_irqsave 2025/08/16 21:43:20 scheduled a reproduction of 'WARNING in folio_lruvec_lock_irqsave (full)' 2025/08/16 21:43:22 runner 9 connected 2025/08/16 21:43:23 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:43:26 attempt #0 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:43:31 runner 8 connected 2025/08/16 21:43:45 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:43:51 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:43:54 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:43:57 runner 6 connected 2025/08/16 21:44:01 runner 0 connected 2025/08/16 21:44:04 runner 2 connected 2025/08/16 21:44:08 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:44:16 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:44:24 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:44:26 runner 9 connected 2025/08/16 21:44:32 runner 8 connected 2025/08/16 21:44:45 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:44:51 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:44:52 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:44:58 runner 6 connected 2025/08/16 21:44:59 new: boot error: can't ssh into the instance 2025/08/16 21:45:05 runner 2 connected 2025/08/16 21:45:10 attempt #1 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:45:11 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:45:19 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:45:26 runner 9 connected 2025/08/16 21:45:26 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:45:32 runner 8 connected 2025/08/16 21:45:41 runner 1 connected 2025/08/16 21:45:48 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:46:00 new: boot error: can't ssh into the instance 2025/08/16 21:46:00 runner 6 connected 2025/08/16 21:46:01 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:46:02 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:46:05 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:46:08 runner 2 connected 2025/08/16 21:46:25 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:46:27 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:46:29 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:46:30 new: boot error: can't ssh into the instance 2025/08/16 21:46:41 runner 7 connected 2025/08/16 21:46:42 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:46:46 runner 1 connected 2025/08/16 21:47:07 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:47:09 runner 2 connected 2025/08/16 21:47:10 runner 6 connected 2025/08/16 21:47:26 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:47:32 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:47:33 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:47:41 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:47:42 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:47:43 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:47:50 runner 1 connected 2025/08/16 21:47:57 STAT { "buffer too small": 0, "candidate triage jobs": 127, "candidates": 47857, "comps overflows": 0, "corpus": 29329, "corpus [files]": 4539, "corpus [symbols]": 9757, "cover overflows": 16438, "coverage": 267016, "distributor delayed": 72460, "distributor undelayed": 72347, "distributor violated": 1791, "exec candidate": 29869, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 24, "exec seeds": 0, "exec smash": 0, "exec total [base]": 138024, "exec total [new]": 139263, "exec triage": 90754, "executor restarts": 2244, "fault jobs": 0, "fuzzer jobs": 127, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 1, "hints jobs": 0, "max signal": 271635, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 23, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 29868, "no exec duration": 12613674000000, "no exec requests": 30670, "pending": 483, "prog exec time": 729, "reproducing": 2, "rpc recv": 25051089452, "rpc sent": 2177520824, "signal": 264516, "smash jobs": 0, "triage jobs": 0, "vm output": 36181623, "vm restarts [base]": 51, "vm restarts [new]": 675 } 2025/08/16 21:48:13 runner 7 connected 2025/08/16 21:48:19 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:48:24 runner 6 connected 2025/08/16 21:48:25 runner 2 connected 2025/08/16 21:48:27 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:48:29 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:48:41 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:48:41 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:48:52 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:49:04 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:49:07 runner 1 connected 2025/08/16 21:49:22 runner 7 connected 2025/08/16 21:49:38 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:49:45 runner 2 connected 2025/08/16 21:49:48 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:49:55 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:50:06 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:50:07 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:50:36 runner 1 connected 2025/08/16 21:50:47 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:50:48 runner 7 connected 2025/08/16 21:50:48 runner 2 connected 2025/08/16 21:50:55 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:51:00 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:51:11 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:51:22 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:51:41 runner 1 connected 2025/08/16 21:51:50 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:51:50 repro finished 'WARNING in folio_lruvec_lock_irqsave', repro=true crepro=false desc='WARNING in folio_lruvec_lock_irqsave' hub=false from_dashboard=false 2025/08/16 21:51:50 found repro for "WARNING in folio_lruvec_lock_irqsave" (orig title: "-SAME-", reliability: 1), took 13.79 minutes 2025/08/16 21:51:50 "WARNING in folio_lruvec_lock_irqsave": saved crash log into 1755381110.crash.log 2025/08/16 21:51:50 start reproducing 'WARNING in folio_lruvec_lock_irqsave' 2025/08/16 21:51:50 "WARNING in folio_lruvec_lock_irqsave": saved repro log into 1755381110.repro.log 2025/08/16 21:51:52 runner 7 connected 2025/08/16 21:52:01 new: boot error: can't ssh into the instance 2025/08/16 21:52:02 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:52:02 repro finished 'WARNING in folio_lruvec_lock_irqsave (full)', repro=true crepro=true desc='WARNING in folio_lruvec_lock_irqsave' hub=false from_dashboard=false 2025/08/16 21:52:02 found repro for "WARNING in folio_lruvec_lock_irqsave" (orig title: "-SAME-", reliability: 1), took 10.32 minutes 2025/08/16 21:52:02 "WARNING in folio_lruvec_lock_irqsave": saved crash log into 1755381122.crash.log 2025/08/16 21:52:02 start reproducing 'WARNING in folio_lruvec_lock_irqsave (full)' 2025/08/16 21:52:02 "WARNING in folio_lruvec_lock_irqsave": saved repro log into 1755381122.repro.log 2025/08/16 21:52:04 runner 2 connected 2025/08/16 21:52:24 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:52:38 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:52:49 runner 3 connected 2025/08/16 21:52:57 STAT { "buffer too small": 0, "candidate triage jobs": 207, "candidates": 47217, "comps overflows": 0, "corpus": 29881, "corpus [files]": 4606, "corpus [symbols]": 9904, "cover overflows": 16808, "coverage": 268175, "distributor delayed": 73713, "distributor undelayed": 73512, "distributor violated": 1936, "exec candidate": 30509, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 24, "exec seeds": 0, "exec smash": 0, "exec total [base]": 140885, "exec total [new]": 142092, "exec triage": 92235, "executor restarts": 2285, "fault jobs": 0, "fuzzer jobs": 207, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 2, "hints jobs": 0, "max signal": 272943, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 24, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 30508, "no exec duration": 12866009000000, "no exec requests": 31313, "pending": 481, "prog exec time": 241, "reproducing": 2, "rpc recv": 25495658440, "rpc sent": 2223602888, "signal": 265682, "smash jobs": 0, "triage jobs": 0, "vm output": 36901999, "vm restarts [base]": 51, "vm restarts [new]": 688 } 2025/08/16 21:53:12 runner 1 connected 2025/08/16 21:53:33 attempt #2 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:53:33 patched-only: WARNING in folio_lruvec_lock_irqsave 2025/08/16 21:53:44 attempt #0 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:54:14 runner 0 connected 2025/08/16 21:54:17 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:54:28 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/16 21:54:36 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:55:04 runner 1 connected 2025/08/16 21:55:16 base: boot error: can't ssh into the instance 2025/08/16 21:55:18 runner 3 connected 2025/08/16 21:55:27 attempt #0 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:55:50 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:55:51 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:56:00 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:56:07 new: boot error: can't ssh into the instance 2025/08/16 21:56:08 new: boot error: can't ssh into the instance 2025/08/16 21:56:31 runner 1 connected 2025/08/16 21:56:41 runner 3 connected 2025/08/16 21:56:55 runner 8 connected 2025/08/16 21:56:57 runner 9 connected 2025/08/16 21:56:57 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:56:59 attempt #1 to run "WARNING in folio_lruvec_lock_irqsave" on base: did not crash 2025/08/16 21:57:31 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:57:35 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:57:41 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:57:47 patched crashed: WARNING in folio_lruvec_lock_irqsave [need repro = false] 2025/08/16 21:57:53 bug reporting terminated 2025/08/16 21:57:53 status reporting terminated 2025/08/16 21:57:53 attempt #2 to run "WARNING in folio_lruvec_lock_irqsave" on base: skipping due to errors: context deadline exceeded / 2025/08/16 21:57:53 repro finished 'WARNING in folio_lruvec_lock_irqsave (full)', repro=false crepro=false desc='' hub=false from_dashboard=false 2025/08/16 21:58:10 reproducing crash 'WARNING in folio_lruvec_lock_irqsave': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/memcontrol.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/16 21:58:10 repro finished 'WARNING in folio_lruvec_lock_irqsave', repro=false crepro=false desc='' hub=false from_dashboard=false 2025/08/16 21:58:53 attempt #1 to run "WARNING in folio_lruvec_lock_irqsave" on base: aborting due to context cancelation 2025/08/16 22:02:44 syz-diff (new): kernel context loop terminated 2025/08/16 22:05:33 syz-diff (base): kernel context loop terminated 2025/08/16 22:05:33 diff fuzzing terminated 2025/08/16 22:05:33 fuzzing is finished 2025/08/16 22:05:33 status at the end: Title On-Base On-Patched WARNING in folio_lruvec_lock_irqsave 669 crashes[reproduced] INFO: task hung in read_part_sector 1 crashes INFO: task hung in v9fs_evict_inode 3 crashes INFO: trying to register non-static key in ocfs2_dlm_shutdown 1 crashes KASAN: slab-use-after-free Read in __xfrm_state_lookup 2 crashes KASAN: slab-use-after-free Read in xfrm_alloc_spi 6 crashes KASAN: slab-use-after-free Read in xfrm_state_find 1 crashes WARNING in ext4_xattr_inode_lookup_create 4 crashes 4 crashes WARNING in xfrm6_tunnel_net_exit 2 crashes WARNING in xfrm_state_fini 3 crashes 3 crashes general protection fault in pcl818_ai_cancel 1 crashes kernel BUG in txUnlock 3 crashes 5 crashes lost connection to test machine 2 crashes 5 crashes possible deadlock in ocfs2_init_acl 4 crashes 2 crashes possible deadlock in ocfs2_reserve_suballoc_bits 2 crashes 2 crashes possible deadlock in ocfs2_try_remove_refcount_tree 7 crashes 1 crashes unregister_netdevice: waiting for DEV to become free 1 crashes