2025/10/28 15:00:07 extracted 322873 text symbol hashes for base and 322895 for patched 2025/10/28 15:00:07 symbol "as102_read_ep2.__UNIQUE_ID_ddebug1314" has different values in base vs patch 2025/10/28 15:00:07 binaries are different, continuing fuzzing 2025/10/28 15:00:07 adding modified_functions to focus areas: ["__access_remote_vm" "__account_obj_stock" "__buffer_migrate_folio" "__folio_batch_add_and_move" "__folio_end_writeback" "__folio_mark_dirty" "__folio_migrate_mapping" "__folio_put" "__folio_split" "__folio_unqueue_deferred_split" "__get_obj_cgroup_from_memcg" "__handle_mm_fault" "__ia32_sys_bpf" "__ia32_sys_mlock" "__ia32_sys_mlock2" "__ia32_sys_munlockall" "__inode_attach_wb" "__lruvec_stat_mod_folio" "__mem_cgroup_charge" "__mem_cgroup_free" "__mem_cgroup_handle_over_high" "__mem_cgroup_try_charge_swap" "__mem_cgroup_uncharge" "__mem_cgroup_uncharge_folios" "__mem_cgroup_usage_register_event" "__mem_cgroup_usage_unregister_event" "__memcg_kmem_charge_page" "__memcg_kmem_uncharge_page" "__memcg_slab_free_hook" "__memcg_slab_post_alloc_hook" "__migrate_device_pages" "__mod_lruvec_kmem_state" "__node_reclaim" "__oom_kill_process" "__page_cache_release" "__pfx_compact_folio_lruvec_lock_irqsave" "__pfx_folio_lruvec_relock_irqsave" "__pfx_folio_matches_lruvec" "__pfx_folio_split_queue_lock_irqsave" "__pfx_get_mem_cgroup_css_from_folio" "__pfx_get_pfn_folio" "__pfx_lru_gen_reparent_memcg" "__pfx_lru_reparent_memcg" "__pfx_lruvec_unlock" "__pfx_lruvec_unlock_irq" "__pfx_lruvec_unlock_irqrestore" "__pfx_max_lru_gen_memcg" "__pfx_recheck_lru_gen_max_memcg" "__pfx_reparent_unlocks" "__pte_alloc" "__pte_alloc_kernel" "__reset_isolation_suitable" "__se_sys_mlockall" "__se_sys_move_pages" "__se_sys_munlock" "__swap_writepage" "__vm_insert_mixed" "__vmalloc_node_range_noprof" "__wb_update_bandwidth" "__x64_sys_bpf" "__x64_sys_mlock" "__x64_sys_mlock2" "__x64_sys_munlockall" "_vm_unmap_aliases" "alloc_charge_folio" "alloc_vmap_area" "balance_dirty_pages" "balance_dirty_pages_ratelimited_flags" "balance_wb_limits" "bdi_split_work_to_wbs" "bio_associate_blkg_from_page" "bpf_link_prime" "bpf_link_put_deferred" "bpf_map_copy_value" "bpf_map_free_deferred" "bpf_map_save_memcg" "bpf_map_update_value" "bpf_mem_alloc_destroy" "bpf_mem_alloc_init" "bpf_mem_alloc_percpu_unit_init" "bpf_obj_get_info_by_fd" "bpf_prog_alloc_id" "bpf_prog_attach" "bpf_prog_detach" "bpf_prog_load" "bpf_prog_put" "bpf_raw_tracepoint_open" "bpf_sys_bpf" "bpf_task_fd_query" "can_change_pmd_writable" "can_split_folio" "cgroup_id_from_mm" "cgroup_writeback_by_id" "change_prot_numa" "charge_memcg" "check_move_unevictable_folios" "cleanup_offline_cgwb" "compact_folio_lruvec_lock_irqsave" "compact_zone" "compaction_alloc" "compaction_proactiveness_sysctl_handler" "copy_page_range" "copy_pmd_range" "copy_remote_vm_str" "count_memcg_event_mm" "count_memcg_events" "count_memcg_folio_events" "count_swpout_vm_event" "current_obj_cgroup" "current_objcg_update" "damon_folio_mkold" "damon_folio_young" "damos_folio_filter_match" "dax_iomap_fault" "deactivate_file_folio" "deferred_split_folio" "deferred_split_scan" "dirty_bytes_handler" "dirty_ratio_handler" "do_huge_pmd_anonymous_page" "do_huge_pmd_wp_page" "do_shrink_slab" "do_try_to_free_pages" "do_wp_page" "drain_local_memcg_stock" "drain_local_obj_stock" "drain_obj_stock" "evict_folios" "filemap_migrate_folio" "flush_memcg_stats_dwork" "folio_activate" "folio_add_lru" "folio_alloc_buffers" "folio_batch_move_lru" "folio_deactivate" "folio_inc_gen" "folio_isolate_lru" "folio_lruvec" "folio_lruvec_lock" "folio_lruvec_lock_irq" "folio_lruvec_lock_irqsave" "folio_lruvec_relock_irq" "folio_lruvec_relock_irqsave" "folio_mark_accessed" "folio_mark_lazyfree" "folio_matches_lruvec" "folio_memcg" "folio_split_memcg_refs" "folio_split_queue_lock" "folio_split_queue_lock_irqsave" "folio_try_dup_anon_rmap_pmd" "folio_try_share_anon_rmap_pmd" "folios_put_refs" "follow_pfnmap_start" "free_percpu" "free_unmap_vmap_area" "get_mem_cgroup_css_from_folio" "get_mem_cgroup_from_current" "get_mem_cgroup_from_folio" "get_mem_cgroup_from_mm" "get_mem_cgroup_from_objcg" "get_obj_cgroup_from_current" "get_obj_cgroup_from_folio" "get_pfn_folio" "get_pmd_pfn" "get_pte_pfn" "handle_mm_fault" "high_work_func" "inc_max_seq" "inode_cgwb_move_to_attached" "inode_switch_wbs" "inode_switch_wbs_work_fn" "insert_page" "invalid_folio_referenced_vma" "isolate_freepages_block" "isolate_lru_folios" "isolate_migratepages_block" "kcompactd" "kern_sys_bpf" "kswapd" "link_create" "locked_inode_to_wb_and_lock_list" "lru_activate" "lru_add" "lru_deactivate" "lru_deactivate_file" "lru_gen_clear_refs" "lru_gen_look_around" "lru_gen_reparent_memcg" "lru_gen_seq_write" "lru_lazyfree" "lru_move_tail" "lru_reparent_memcg" "lruvec_is_sizable" "lruvec_unlock" "lruvec_unlock_irq" "lruvec_unlock_irqrestore" "madvise_free_huge_pmd" "map_anon_folio_pmd_pf" "map_delete_elem" "map_lookup_and_delete_elem" "map_lookup_elem" "map_update_elem" "max_lru_gen_memcg" "mem_cgroup_charge_hugetlb" "mem_cgroup_css_free" "mem_cgroup_css_offline" "mem_cgroup_css_online" "mem_cgroup_exit" "mem_cgroup_from_slab_obj" "mem_cgroup_get_oom_group" "mem_cgroup_id_get_online" "mem_cgroup_id_put_many" "mem_cgroup_iter" "mem_cgroup_iter_break" "mem_cgroup_migrate" "mem_cgroup_oom_synchronize" "mem_cgroup_replace_folio" "mem_cgroup_scan_tasks" "mem_cgroup_sk_alloc" "mem_cgroup_sk_free" "mem_cgroup_sk_inherit" "mem_cgroup_swap_full" "mem_cgroup_swapin_charge_folio" "mem_cgroup_track_foreign_dirty_slowpath" "mem_cgroup_wb_stats" "mem_cgroup_write" "memcg1_check_events" "memcg1_soft_limit_reclaim" "memcg1_swapout" "memcg_event_remove" "memcg_hotplug_cpu_dead" "memcg_numa_stat_show" "memcg_write_event_control" "memory_max_write" "memory_numa_stat_show" "memory_stat_format" "memory_stat_show" "migrate_folio" "migrate_folio_done" "migrate_huge_page_move_mapping" "migrate_pages" "migrate_pages_batch" "mlock_drain_remote" "mlock_folio" "mlock_folio_batch" "mlock_new_folio" "mm_get_huge_zero_folio" "mod_memcg_page_state" "mod_objcg_mlstate" "move_folios_to_lru" "move_pages_and_store_status" "munlock_folio" "numa_migrate_check" "obj_cgroup_charge_account" "obj_cgroup_charge_pages" "obj_cgroup_charge_zswap" "obj_cgroup_may_zswap" "obj_cgroup_put" "obj_cgroup_release" "obj_cgroup_uncharge_zswap" "page_cgroup_ino" "pcpu_alloc_area" "pcpu_alloc_noprof" "pcpu_balance_workfn" "pcpu_create_chunk" "pcpu_find_block_fit" "pcpu_free_area" "pcpu_memcg_post_alloc_hook" "pcpu_populate_chunk" "pcpu_reintegrate_chunk" "perf_trace_mm_lru_insertion" "perf_trace_track_foreign_dirty" "prepare_kswapd_sleep" "read_page_owner" "recheck_lru_gen_max_memcg" "reclaim_folio_list" "redirty_tail_locked" "refill_obj_stock" "refill_stock" "release_free_list" "remove_device_exclusive_entry" "remove_vm_area" "reparent_shrinker_deferred" "reparent_unlocks" "set_pmd_migration_entry" "shadow_lru_isolate" "shmem_swapin_folio" "shrink_active_list" "shrink_folio_list" "shrink_lruvec" "shrink_memcg_cb" "shrink_node" "shrink_slab" "shrink_worker" "sio_read_complete" "split_huge_pages_all" "split_huge_pages_in_file" "split_huge_pages_write" "split_huge_pmd_locked" "split_page_memcg" "swap_read_folio" "swap_writeout" "trace_event_raw_event_mm_lru_insertion" "trace_event_raw_event_track_foreign_dirty" "try_charge_memcg" "try_restore_exclusive_pte" "try_to_compact_pages" "try_to_free_pages" "try_to_inc_max_seq" "try_to_shrink_lruvec" "uncharge_batch" "uncharge_folio" "unmap_huge_pmd_locked" "unmap_page_range" "vm_insert_pages" "vm_map_ram" "vm_unmap_ram" "vmap_pfn_apply" "walk_pmd_range_locked" "walk_pud_range" "walk_update_folio" "wb_dirty_limits" "wb_put" "wb_workfn" "wbc_account_cgroup_owner" "wbc_detach_inode" "workingset_activation" "workingset_eviction" "workingset_refault" "writeback_inodes_wb" "writeback_sb_inodes" "writeback_single_inode" "zap_huge_pmd" "zswap_current_read" "zswap_entry_free" "zswap_folio_swapin" "zswap_invalidate" "zswap_load" "zswap_store"] 2025/10/28 15:00:07 adding directly modified files to focus areas: ["fs/buffer.c" "fs/fs-writeback.c" "include/linux/memcontrol.h" "include/linux/mm_inline.h" "include/linux/mmzone.h" "include/trace/events/writeback.h" "mm/compaction.c" "mm/huge_memory.c" "mm/memcontrol-v1.c" "mm/memcontrol.c" "mm/migrate.c" "mm/mlock.c" "mm/page_io.c" "mm/percpu.c" "mm/shrinker.c" "mm/swap.c" "mm/vmscan.c" "mm/workingset.c" "mm/zswap.c"] 2025/10/28 15:00:07 downloading corpus #1: "https://storage.googleapis.com/syzkaller/corpus/ci-upstream-kasan-gce-root-corpus.db" 2025/10/28 15:01:05 runner 2 connected 2025/10/28 15:01:05 runner 6 connected 2025/10/28 15:01:05 runner 3 connected 2025/10/28 15:01:06 runner 1 connected 2025/10/28 15:01:06 runner 8 connected 2025/10/28 15:01:06 runner 5 connected 2025/10/28 15:01:06 runner 4 connected 2025/10/28 15:01:06 runner 1 connected 2025/10/28 15:01:06 runner 7 connected 2025/10/28 15:01:06 runner 2 connected 2025/10/28 15:01:06 runner 0 connected 2025/10/28 15:01:07 runner 0 connected 2025/10/28 15:01:12 executor cover filter: 0 PCs 2025/10/28 15:01:12 initializing coverage information... 2025/10/28 15:01:16 discovered 7609 source files, 333847 symbols 2025/10/28 15:01:17 machine check: disabled the following syscalls: fsetxattr$security_selinux : selinux is not enabled fsetxattr$security_smack_transmute : smack is not enabled fsetxattr$smack_xattr_label : smack is not enabled get_thread_area : syscall get_thread_area is not present lookup_dcookie : syscall lookup_dcookie is not present lsetxattr$security_selinux : selinux is not enabled lsetxattr$security_smack_transmute : smack is not enabled lsetxattr$smack_xattr_label : smack is not enabled mount$esdfs : /proc/filesystems does not contain esdfs mount$incfs : /proc/filesystems does not contain incremental-fs openat$acpi_thermal_rel : failed to open /dev/acpi_thermal_rel: no such file or directory openat$ashmem : failed to open /dev/ashmem: no such file or directory openat$bifrost : failed to open /dev/bifrost: no such file or directory openat$binder : failed to open /dev/binder: no such file or directory openat$camx : failed to open /dev/v4l/by-path/platform-soc@0:qcom_cam-req-mgr-video-index0: no such file or directory openat$capi20 : failed to open /dev/capi20: no such file or directory openat$cdrom1 : failed to open /dev/cdrom1: no such file or directory openat$damon_attrs : failed to open /sys/kernel/debug/damon/attrs: no such file or directory openat$damon_init_regions : failed to open /sys/kernel/debug/damon/init_regions: no such file or directory openat$damon_kdamond_pid : failed to open /sys/kernel/debug/damon/kdamond_pid: no such file or directory openat$damon_mk_contexts : failed to open /sys/kernel/debug/damon/mk_contexts: no such file or directory openat$damon_monitor_on : failed to open /sys/kernel/debug/damon/monitor_on: no such file or directory openat$damon_rm_contexts : failed to open /sys/kernel/debug/damon/rm_contexts: no such file or directory openat$damon_schemes : failed to open /sys/kernel/debug/damon/schemes: no such file or directory openat$damon_target_ids : failed to open /sys/kernel/debug/damon/target_ids: no such file or directory openat$hwbinder : failed to open /dev/hwbinder: no such file or directory openat$i915 : failed to open /dev/i915: no such file or directory openat$img_rogue : failed to open /dev/img-rogue: no such file or directory openat$irnet : failed to open /dev/irnet: no such file or directory openat$keychord : failed to open /dev/keychord: no such file or directory openat$kvm : failed to open /dev/kvm: no such file or directory openat$lightnvm : failed to open /dev/lightnvm/control: no such file or directory openat$mali : failed to open /dev/mali0: no such file or directory openat$md : failed to open /dev/md0: no such file or directory openat$msm : failed to open /dev/msm: no such file or directory openat$ndctl0 : failed to open /dev/ndctl0: no such file or directory openat$nmem0 : failed to open /dev/nmem0: no such file or directory openat$pktcdvd : failed to open /dev/pktcdvd/control: no such file or directory openat$pmem0 : failed to open /dev/pmem0: no such file or directory openat$proc_capi20 : failed to open /proc/capi/capi20: no such file or directory openat$proc_capi20ncci : failed to open /proc/capi/capi20ncci: no such file or directory openat$proc_reclaim : failed to open /proc/self/reclaim: no such file or directory openat$ptp1 : failed to open /dev/ptp1: no such file or directory openat$rnullb : failed to open /dev/rnullb0: no such file or directory openat$selinux_access : failed to open /selinux/access: no such file or directory openat$selinux_attr : selinux is not enabled openat$selinux_avc_cache_stats : failed to open /selinux/avc/cache_stats: no such file or directory openat$selinux_avc_cache_threshold : failed to open /selinux/avc/cache_threshold: no such file or directory openat$selinux_avc_hash_stats : failed to open /selinux/avc/hash_stats: no such file or directory openat$selinux_checkreqprot : failed to open /selinux/checkreqprot: no such file or directory openat$selinux_commit_pending_bools : failed to open /selinux/commit_pending_bools: no such file or directory openat$selinux_context : failed to open /selinux/context: no such file or directory openat$selinux_create : failed to open /selinux/create: no such file or directory openat$selinux_enforce : failed to open /selinux/enforce: no such file or directory openat$selinux_load : failed to open /selinux/load: no such file or directory openat$selinux_member : failed to open /selinux/member: no such file or directory openat$selinux_mls : failed to open /selinux/mls: no such file or directory openat$selinux_policy : failed to open /selinux/policy: no such file or directory openat$selinux_relabel : failed to open /selinux/relabel: no such file or directory openat$selinux_status : failed to open /selinux/status: no such file or directory openat$selinux_user : failed to open /selinux/user: no such file or directory openat$selinux_validatetrans : failed to open /selinux/validatetrans: no such file or directory openat$sev : failed to open /dev/sev: no such file or directory openat$sgx_provision : failed to open /dev/sgx_provision: no such file or directory openat$smack_task_current : smack is not enabled openat$smack_thread_current : smack is not enabled openat$smackfs_access : failed to open /sys/fs/smackfs/access: no such file or directory openat$smackfs_ambient : failed to open /sys/fs/smackfs/ambient: no such file or directory openat$smackfs_change_rule : failed to open /sys/fs/smackfs/change-rule: no such file or directory openat$smackfs_cipso : failed to open /sys/fs/smackfs/cipso: no such file or directory openat$smackfs_cipsonum : failed to open /sys/fs/smackfs/direct: no such file or directory openat$smackfs_ipv6host : failed to open /sys/fs/smackfs/ipv6host: no such file or directory openat$smackfs_load : failed to open /sys/fs/smackfs/load: no such file or directory openat$smackfs_logging : failed to open /sys/fs/smackfs/logging: no such file or directory openat$smackfs_netlabel : failed to open /sys/fs/smackfs/netlabel: no such file or directory openat$smackfs_onlycap : failed to open /sys/fs/smackfs/onlycap: no such file or directory openat$smackfs_ptrace : failed to open /sys/fs/smackfs/ptrace: no such file or directory openat$smackfs_relabel_self : failed to open /sys/fs/smackfs/relabel-self: no such file or directory openat$smackfs_revoke_subject : failed to open /sys/fs/smackfs/revoke-subject: no such file or directory openat$smackfs_syslog : failed to open /sys/fs/smackfs/syslog: no such file or directory openat$smackfs_unconfined : failed to open /sys/fs/smackfs/unconfined: no such file or directory openat$tlk_device : failed to open /dev/tlk_device: no such file or directory openat$trusty : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_avb : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_gatekeeper : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwkey : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwrng : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km_secure : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_storage : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$tty : failed to open /dev/tty: no such device or address openat$uverbs0 : failed to open /dev/infiniband/uverbs0: no such file or directory openat$vfio : failed to open /dev/vfio/vfio: no such file or directory openat$vndbinder : failed to open /dev/vndbinder: no such file or directory openat$vtpm : failed to open /dev/vtpmx: no such file or directory openat$xenevtchn : failed to open /dev/xen/evtchn: no such file or directory openat$zygote : failed to open /dev/socket/zygote: no such file or directory pkey_alloc : pkey_alloc(0x0, 0x0) failed: no space left on device read$smackfs_access : smack is not enabled read$smackfs_cipsonum : smack is not enabled read$smackfs_logging : smack is not enabled read$smackfs_ptrace : smack is not enabled set_thread_area : syscall set_thread_area is not present setxattr$security_selinux : selinux is not enabled setxattr$security_smack_transmute : smack is not enabled setxattr$smack_xattr_label : smack is not enabled socket$hf : socket$hf(0x13, 0x2, 0x0) failed: address family not supported by protocol socket$inet6_dccp : socket$inet6_dccp(0xa, 0x6, 0x0) failed: socket type not supported socket$inet_dccp : socket$inet_dccp(0x2, 0x6, 0x0) failed: socket type not supported socket$vsock_dgram : socket$vsock_dgram(0x28, 0x2, 0x0) failed: no such device syz_btf_id_by_name$bpf_lsm : failed to open /sys/kernel/btf/vmlinux: no such file or directory syz_init_net_socket$bt_cmtp : syz_init_net_socket$bt_cmtp(0x1f, 0x3, 0x5) failed: protocol not supported syz_kvm_setup_cpu$ppc64 : unsupported arch syz_mount_image$bcachefs : /proc/filesystems does not contain bcachefs syz_mount_image$ntfs : /proc/filesystems does not contain ntfs syz_mount_image$reiserfs : /proc/filesystems does not contain reiserfs syz_mount_image$sysv : /proc/filesystems does not contain sysv syz_mount_image$v7 : /proc/filesystems does not contain v7 syz_open_dev$dricontrol : failed to open /dev/dri/controlD#: no such file or directory syz_open_dev$drirender : failed to open /dev/dri/renderD#: no such file or directory syz_open_dev$floppy : failed to open /dev/fd#: no such file or directory syz_open_dev$ircomm : failed to open /dev/ircomm#: no such file or directory syz_open_dev$sndhw : failed to open /dev/snd/hwC#D#: no such file or directory syz_pkey_set : pkey_alloc(0x0, 0x0) failed: no space left on device uselib : syscall uselib is not present write$selinux_access : selinux is not enabled write$selinux_attr : selinux is not enabled write$selinux_context : selinux is not enabled write$selinux_create : selinux is not enabled write$selinux_load : selinux is not enabled write$selinux_user : selinux is not enabled write$selinux_validatetrans : selinux is not enabled write$smack_current : smack is not enabled write$smackfs_access : smack is not enabled write$smackfs_change_rule : smack is not enabled write$smackfs_cipso : smack is not enabled write$smackfs_cipsonum : smack is not enabled write$smackfs_ipv6host : smack is not enabled write$smackfs_label : smack is not enabled write$smackfs_labels_list : smack is not enabled write$smackfs_load : smack is not enabled write$smackfs_logging : smack is not enabled write$smackfs_netlabel : smack is not enabled write$smackfs_ptrace : smack is not enabled transitively disabled the following syscalls (missing resource [creating syscalls]): bind$vsock_dgram : sock_vsock_dgram [socket$vsock_dgram] close$ibv_device : fd_rdma [openat$uverbs0] connect$hf : sock_hf [socket$hf] connect$vsock_dgram : sock_vsock_dgram [socket$vsock_dgram] getsockopt$inet6_dccp_buf : sock_dccp6 [socket$inet6_dccp] getsockopt$inet6_dccp_int : sock_dccp6 [socket$inet6_dccp] getsockopt$inet_dccp_buf : sock_dccp [socket$inet_dccp] getsockopt$inet_dccp_int : sock_dccp [socket$inet_dccp] ioctl$ACPI_THERMAL_GET_ART : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_ART_COUNT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_ART_LEN : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT_COUNT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT_LEN : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ASHMEM_GET_NAME : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_PIN_STATUS : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_PROT_MASK : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_SIZE : fd_ashmem [openat$ashmem] ioctl$ASHMEM_PURGE_ALL_CACHES : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_NAME : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_PROT_MASK : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_SIZE : fd_ashmem [openat$ashmem] ioctl$CAPI_CLR_FLAGS : fd_capi20 [openat$capi20] ioctl$CAPI_GET_ERRCODE : fd_capi20 [openat$capi20] ioctl$CAPI_GET_FLAGS : fd_capi20 [openat$capi20] ioctl$CAPI_GET_MANUFACTURER : fd_capi20 [openat$capi20] ioctl$CAPI_GET_PROFILE : fd_capi20 [openat$capi20] ioctl$CAPI_GET_SERIAL : fd_capi20 [openat$capi20] ioctl$CAPI_INSTALLED : fd_capi20 [openat$capi20] ioctl$CAPI_MANUFACTURER_CMD : fd_capi20 [openat$capi20] ioctl$CAPI_NCCI_GETUNIT : fd_capi20 [openat$capi20] ioctl$CAPI_NCCI_OPENCOUNT : fd_capi20 [openat$capi20] ioctl$CAPI_REGISTER : fd_capi20 [openat$capi20] ioctl$CAPI_SET_FLAGS : fd_capi20 [openat$capi20] ioctl$CREATE_COUNTERS : fd_rdma [openat$uverbs0] ioctl$DESTROY_COUNTERS : fd_rdma [openat$uverbs0] ioctl$DRM_IOCTL_I915_GEM_BUSY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_DESTROY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER2 : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER2_WR : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_APERTURE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_CACHING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_TILING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MADVISE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP_GTT : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP_OFFSET : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PREAD : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PWRITE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_CACHING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_DOMAIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_TILING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SW_FINISH : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_THROTTLE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_UNPIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_USERPTR : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_VM_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_VM_DESTROY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_WAIT : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GET_RESET_STATS : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_OVERLAY_ATTRS : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_OVERLAY_PUT_IMAGE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_ADD_CONFIG : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_OPEN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_REMOVE_CONFIG : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_QUERY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_REG_READ : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_SET_SPRITE_COLORKEY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_MSM_GEM_CPU_FINI : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_CPU_PREP : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_INFO : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_MADVISE : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_NEW : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_SUBMIT : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GET_PARAM : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SET_PARAM : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_NEW : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_QUERY : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_WAIT_FENCE : fd_msm [openat$msm] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPEXEC: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_HTBUFFER_HTBLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_CHANGESPARSEMEM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTPIN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMREXPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRGETUID: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRIMPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNREFPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLWRITEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_CONNECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_DISCONNECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMSET: fd_rogue [openat$img_rogue] ioctl$FLOPPY_FDCLRPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDDEFPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDEJECT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFLUSH : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTBEG : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTEND : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTTRK : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVTYP : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETFDCSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETMAXERRS : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDMSGOFF : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDMSGON : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDPOLLDRVSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDRAWCMD : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDRESET : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETDRVPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETEMSGTRESH : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETMAXERRS : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDTWADDLE : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDWERRORCLR : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDWERRORGET : fd_floppy [syz_open_dev$floppy] ioctl$KBASE_HWCNT_READER_CLEAR : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_DISABLE_EVENT : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_DUMP : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_ENABLE_EVENT : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_API_VERSION : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_API_VERSION_WITH_FEATURES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER_SIZE : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER_WITH_CYCLES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_HWVER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_PUT_BUFFER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_PUT_BUFFER_WITH_CYCLES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_SET_INTERVAL : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_IOCTL_BUFFER_LIVENESS_UPDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CONTEXT_PRIORITY_CHECK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_CPU_QUEUE_DUMP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_EVENT_SIGNAL : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_GET_GLB_IFACE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_BIND : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_KICK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_REGISTER : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_REGISTER_EX : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_TERMINATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_TERM : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_DISJOINT_QUERY : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_FENCE_VALIDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_CONTEXT_ID : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_CPU_GPU_TIMEINFO : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_DDK_VERSION : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_GPUPROPS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_CLEAR : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_DUMP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_ENABLE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_READER_SETUP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_SET : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_JOB_SUBMIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_DELETE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_ENQUEUE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_CMD : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_GET_SAMPLE : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_PUT_SAMPLE : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALIAS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALLOC : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALLOC_EX : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_COMMIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_EXEC_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FIND_CPU_OFFSET : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET: fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FLAGS_CHANGE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FREE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_IMPORT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT_10_2 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT_11_5 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_PROFILE_ADD : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_QUERY : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_SYNC : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_POST_TERM : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_READ_USER_PAGE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SET_FLAGS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SET_LIMITED_CORE_COUNT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SOFT_EVENT_UPDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STICKY_RESOURCE_MAP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STICKY_RESOURCE_UNMAP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STREAM_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_TLSTREAM_ACQUIRE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_TLSTREAM_FLUSH : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_VERSION_CHECK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_VERSION_CHECK_RESERVED : fd_bifrost [openat$bifrost openat$mali] ioctl$KVM_ASSIGN_SET_MSIX_ENTRY : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_ASSIGN_SET_MSIX_NR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DIRTY_LOG_RING : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DIRTY_LOG_RING_ACQ_REL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DISABLE_QUIRKS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DISABLE_QUIRKS2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_ENFORCE_PV_FEATURE_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_EXCEPTION_PAYLOAD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_EXIT_HYPERCALL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_EXIT_ON_EMULATION_FAILURE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HALT_POLL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_DIRECT_TLBFLUSH : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_ENFORCE_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_ENLIGHTENED_VMCS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_SEND_IPI : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_SYNIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_SYNIC2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_TLBFLUSH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_VP_INDEX : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MAX_VCPU_ID : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MEMORY_FAULT_INFO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MSR_PLATFORM_INFO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_PMU_CAPABILITY : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_PTP_KVM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SGX_ATTRIBUTE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SPLIT_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_STEAL_TIME : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SYNC_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_VM_COPY_ENC_CONTEXT_FROM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_DISABLE_NX_HUGE_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_TYPES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X2APIC_API : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_APIC_BUS_CYCLES_NS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_BUS_LOCK_EXIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_DISABLE_EXITS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_GUEST_MODE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_NOTIFY_VMEXIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_USER_SPACE_MSR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_XEN_HVM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CHECK_EXTENSION : fd_kvm [openat$kvm] ioctl$KVM_CHECK_EXTENSION_VM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CLEAR_DIRTY_LOG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_DEVICE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_GUEST_MEMFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_VCPU : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_VM : fd_kvm [openat$kvm] ioctl$KVM_DIRTY_TLB : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_API_VERSION : fd_kvm [openat$kvm] ioctl$KVM_GET_CLOCK : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_CPUID2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEBUGREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_GET_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_DIRTY_LOG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_EMULATED_CPUID : fd_kvm [openat$kvm] ioctl$KVM_GET_FPU : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_LAPIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MP_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MSRS_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MSRS_sys : fd_kvm [openat$kvm] ioctl$KVM_GET_MSR_FEATURE_INDEX_LIST : fd_kvm [openat$kvm] ioctl$KVM_GET_MSR_INDEX_LIST : fd_kvm [openat$kvm] ioctl$KVM_GET_NESTED_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_NR_MMU_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_ONE_REG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_PIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_REG_LIST : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SREGS2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_STATS_FD_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_STATS_FD_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_SUPPORTED_CPUID : fd_kvm [openat$kvm] ioctl$KVM_GET_SUPPORTED_HV_CPUID_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SUPPORTED_HV_CPUID_sys : fd_kvm [openat$kvm] ioctl$KVM_GET_TSC_KHZ_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_TSC_KHZ_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_VCPU_EVENTS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_VCPU_MMAP_SIZE : fd_kvm [openat$kvm] ioctl$KVM_GET_XCRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_XSAVE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_XSAVE2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_HAS_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_HAS_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_HAS_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_HYPERV_EVENTFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_INTERRUPT : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_IOEVENTFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQ_LINE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQ_LINE_STATUS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_KVMCLOCK_CTRL : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_MEMORY_ENCRYPT_REG_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_MEMORY_ENCRYPT_UNREG_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_NMI : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_PPC_ALLOCATE_HTAB : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_PRE_FAULT_MEMORY : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_REGISTER_COALESCED_MMIO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_REINJECT_CONTROL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_RESET_DIRTY_RINGS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_RUN : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_S390_VCPU_FAULT : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_BOOT_CPU_ID : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_CLOCK : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_CPUID2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEBUGREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_SET_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_FPU : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_GSI_ROUTING : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_GUEST_DEBUG_x86 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_IDENTITY_MAP_ADDR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_LAPIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_MEMORY_ATTRIBUTES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_MP_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_MSRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_NESTED_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_NR_MMU_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_ONE_REG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_PIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SIGNAL_MASK : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SREGS2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_TSC_KHZ_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_TSC_KHZ_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_TSS_ADDR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_USER_MEMORY_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_USER_MEMORY_REGION2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_VAPIC_ADDR : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_VCPU_EVENTS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_XCRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_XSAVE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SEV_CERT_EXPORT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_DBG_DECRYPT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_DBG_ENCRYPT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_ES_INIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_GET_ATTESTATION_REPORT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_GUEST_STATUS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_INIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_INIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_MEASURE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_SECRET : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_CANCEL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_UPDATE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SIGNAL_MSI : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SMI : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_TPR_ACCESS_REPORTING : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_TRANSLATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_UNREGISTER_COALESCED_MMIO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_X86_GET_MCE_CAP_SUPPORTED : fd_kvm [openat$kvm] ioctl$KVM_X86_SETUP_MCE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_X86_SET_MCE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_X86_SET_MSR_FILTER : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_XEN_HVM_CONFIG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$PERF_EVENT_IOC_DISABLE : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_ENABLE : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_ID : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_MODIFY_ATTRIBUTES : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_PAUSE_OUTPUT : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_PERIOD : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_QUERY_BPF : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_REFRESH : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_RESET : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_BPF : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_FILTER : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_OUTPUT : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$READ_COUNTERS : fd_rdma [openat$uverbs0] ioctl$SNDRV_FIREWIRE_IOCTL_GET_INFO : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_LOCK : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_TASCAM_STATE : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_UNLOCK : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_DSP_LOAD : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_DSP_STATUS : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_INFO : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_PVERSION : fd_snd_hw [syz_open_dev$sndhw] ioctl$TE_IOCTL_CLOSE_CLIENT_SESSION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_LAUNCH_OPERATION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_OPEN_CLIENT_SESSION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_SS_CMD : fd_tlk [openat$tlk_device] ioctl$TIPC_IOC_CONNECT : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] ioctl$TIPC_IOC_CONNECT_avb : fd_trusty_avb [openat$trusty_avb] ioctl$TIPC_IOC_CONNECT_gatekeeper : fd_trusty_gatekeeper [openat$trusty_gatekeeper] ioctl$TIPC_IOC_CONNECT_hwkey : fd_trusty_hwkey [openat$trusty_hwkey] ioctl$TIPC_IOC_CONNECT_hwrng : fd_trusty_hwrng [openat$trusty_hwrng] ioctl$TIPC_IOC_CONNECT_keymaster_secure : fd_trusty_km_secure [openat$trusty_km_secure] ioctl$TIPC_IOC_CONNECT_km : fd_trusty_km [openat$trusty_km] ioctl$TIPC_IOC_CONNECT_storage : fd_trusty_storage [openat$trusty_storage] ioctl$VFIO_CHECK_EXTENSION : fd_vfio [openat$vfio] ioctl$VFIO_GET_API_VERSION : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_GET_INFO : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_MAP_DMA : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_UNMAP_DMA : fd_vfio [openat$vfio] ioctl$VFIO_SET_IOMMU : fd_vfio [openat$vfio] ioctl$VTPM_PROXY_IOC_NEW_DEV : fd_vtpm [openat$vtpm] ioctl$sock_bt_cmtp_CMTPCONNADD : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPCONNDEL : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPGETCONNINFO : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPGETCONNLIST : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] mmap$DRM_I915 : fd_i915 [openat$i915] mmap$DRM_MSM : fd_msm [openat$msm] mmap$KVM_VCPU : vcpu_mmap_size [ioctl$KVM_GET_VCPU_MMAP_SIZE] mmap$bifrost : fd_bifrost [openat$bifrost openat$mali] mmap$perf : fd_perf [perf_event_open perf_event_open$cgroup] pkey_free : pkey [pkey_alloc] pkey_mprotect : pkey [pkey_alloc] read$sndhw : fd_snd_hw [syz_open_dev$sndhw] read$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] recvmsg$hf : sock_hf [socket$hf] sendmsg$hf : sock_hf [socket$hf] setsockopt$inet6_dccp_buf : sock_dccp6 [socket$inet6_dccp] setsockopt$inet6_dccp_int : sock_dccp6 [socket$inet6_dccp] setsockopt$inet_dccp_buf : sock_dccp [socket$inet_dccp] setsockopt$inet_dccp_int : sock_dccp [socket$inet_dccp] syz_kvm_add_vcpu$x86 : kvm_syz_vm$x86 [syz_kvm_setup_syzos_vm$x86] syz_kvm_assert_syzos_kvm_exit$x86 : kvm_run_ptr [mmap$KVM_VCPU] syz_kvm_assert_syzos_uexit$x86 : kvm_run_ptr [mmap$KVM_VCPU] syz_kvm_setup_cpu$x86 : fd_kvmvm [ioctl$KVM_CREATE_VM] syz_kvm_setup_syzos_vm$x86 : fd_kvmvm [ioctl$KVM_CREATE_VM] syz_memcpy_off$KVM_EXIT_HYPERCALL : kvm_run_ptr [mmap$KVM_VCPU] syz_memcpy_off$KVM_EXIT_MMIO : kvm_run_ptr [mmap$KVM_VCPU] write$ALLOC_MW : fd_rdma [openat$uverbs0] write$ALLOC_PD : fd_rdma [openat$uverbs0] write$ATTACH_MCAST : fd_rdma [openat$uverbs0] write$CLOSE_XRCD : fd_rdma [openat$uverbs0] write$CREATE_AH : fd_rdma [openat$uverbs0] write$CREATE_COMP_CHANNEL : fd_rdma [openat$uverbs0] write$CREATE_CQ : fd_rdma [openat$uverbs0] write$CREATE_CQ_EX : fd_rdma [openat$uverbs0] write$CREATE_FLOW : fd_rdma [openat$uverbs0] write$CREATE_QP : fd_rdma [openat$uverbs0] write$CREATE_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$CREATE_SRQ : fd_rdma [openat$uverbs0] write$CREATE_WQ : fd_rdma [openat$uverbs0] write$DEALLOC_MW : fd_rdma [openat$uverbs0] write$DEALLOC_PD : fd_rdma [openat$uverbs0] write$DEREG_MR : fd_rdma [openat$uverbs0] write$DESTROY_AH : fd_rdma [openat$uverbs0] write$DESTROY_CQ : fd_rdma [openat$uverbs0] write$DESTROY_FLOW : fd_rdma [openat$uverbs0] write$DESTROY_QP : fd_rdma [openat$uverbs0] write$DESTROY_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$DESTROY_SRQ : fd_rdma [openat$uverbs0] write$DESTROY_WQ : fd_rdma [openat$uverbs0] write$DETACH_MCAST : fd_rdma [openat$uverbs0] write$MLX5_ALLOC_PD : fd_rdma [openat$uverbs0] write$MLX5_CREATE_CQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_DV_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_SRQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_WQ : fd_rdma [openat$uverbs0] write$MLX5_GET_CONTEXT : fd_rdma [openat$uverbs0] write$MLX5_MODIFY_WQ : fd_rdma [openat$uverbs0] write$MODIFY_QP : fd_rdma [openat$uverbs0] write$MODIFY_SRQ : fd_rdma [openat$uverbs0] write$OPEN_XRCD : fd_rdma [openat$uverbs0] write$POLL_CQ : fd_rdma [openat$uverbs0] write$POST_RECV : fd_rdma [openat$uverbs0] write$POST_SEND : fd_rdma [openat$uverbs0] write$POST_SRQ_RECV : fd_rdma [openat$uverbs0] write$QUERY_DEVICE_EX : fd_rdma [openat$uverbs0] write$QUERY_PORT : fd_rdma [openat$uverbs0] write$QUERY_QP : fd_rdma [openat$uverbs0] write$QUERY_SRQ : fd_rdma [openat$uverbs0] write$REG_MR : fd_rdma [openat$uverbs0] write$REQ_NOTIFY_CQ : fd_rdma [openat$uverbs0] write$REREG_MR : fd_rdma [openat$uverbs0] write$RESIZE_CQ : fd_rdma [openat$uverbs0] write$capi20 : fd_capi20 [openat$capi20] write$capi20_data : fd_capi20 [openat$capi20] write$damon_attrs : fd_damon_attrs [openat$damon_attrs] write$damon_contexts : fd_damon_contexts [openat$damon_mk_contexts openat$damon_rm_contexts] write$damon_init_regions : fd_damon_init_regions [openat$damon_init_regions] write$damon_monitor_on : fd_damon_monitor_on [openat$damon_monitor_on] write$damon_schemes : fd_damon_schemes [openat$damon_schemes] write$damon_target_ids : fd_damon_target_ids [openat$damon_target_ids] write$proc_reclaim : fd_proc_reclaim [openat$proc_reclaim] write$sndhw : fd_snd_hw [syz_open_dev$sndhw] write$sndhw_fireworks : fd_snd_hw [syz_open_dev$sndhw] write$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] write$trusty_avb : fd_trusty_avb [openat$trusty_avb] write$trusty_gatekeeper : fd_trusty_gatekeeper [openat$trusty_gatekeeper] write$trusty_hwkey : fd_trusty_hwkey [openat$trusty_hwkey] write$trusty_hwrng : fd_trusty_hwrng [openat$trusty_hwrng] write$trusty_km : fd_trusty_km [openat$trusty_km] write$trusty_km_secure : fd_trusty_km_secure [openat$trusty_km_secure] write$trusty_storage : fd_trusty_storage [openat$trusty_storage] BinFmtMisc : enabled Comparisons : enabled Coverage : enabled DelayKcovMmap : enabled DevlinkPCI : PCI device 0000:00:10.0 is not available ExtraCoverage : enabled Fault : enabled KCSAN : write(/sys/kernel/debug/kcsan, on) failed KcovResetIoctl : kernel does not support ioctl(KCOV_RESET_TRACE) LRWPANEmulation : enabled Leak : failed to write(kmemleak, "scan=off") NetDevices : enabled NetInjection : enabled NicVF : PCI device 0000:00:11.0 is not available SandboxAndroid : setfilecon: setxattr failed. (errno 1: Operation not permitted). . process exited with status 67. SandboxNamespace : enabled SandboxNone : enabled SandboxSetuid : enabled Swap : enabled USBEmulation : enabled VhciInjection : enabled WifiEmulation : enabled syscalls : 3838/8056 2025/10/28 15:01:17 base: machine check complete 2025/10/28 15:01:18 coverage filter: __access_remote_vm: [__access_remote_vm] 2025/10/28 15:01:18 coverage filter: __account_obj_stock: [] 2025/10/28 15:01:18 coverage filter: __buffer_migrate_folio: [__buffer_migrate_folio] 2025/10/28 15:01:18 coverage filter: __folio_batch_add_and_move: [__folio_batch_add_and_move] 2025/10/28 15:01:18 coverage filter: __folio_end_writeback: [__folio_end_writeback] 2025/10/28 15:01:18 coverage filter: __folio_mark_dirty: [__folio_mark_dirty] 2025/10/28 15:01:18 coverage filter: __folio_migrate_mapping: [__folio_migrate_mapping] 2025/10/28 15:01:18 coverage filter: __folio_put: [__folio_put] 2025/10/28 15:01:18 coverage filter: __folio_split: [__folio_split] 2025/10/28 15:01:18 coverage filter: __folio_unqueue_deferred_split: [__folio_unqueue_deferred_split] 2025/10/28 15:01:18 coverage filter: __get_obj_cgroup_from_memcg: [] 2025/10/28 15:01:18 coverage filter: __handle_mm_fault: [__handle_mm_fault] 2025/10/28 15:01:18 coverage filter: __ia32_sys_bpf: [__ia32_sys_bpf] 2025/10/28 15:01:18 coverage filter: __ia32_sys_mlock: [__ia32_sys_mlock __ia32_sys_mlock2 __ia32_sys_mlockall] 2025/10/28 15:01:18 coverage filter: __ia32_sys_mlock2: [] 2025/10/28 15:01:18 coverage filter: __ia32_sys_munlockall: [__ia32_sys_munlockall] 2025/10/28 15:01:18 coverage filter: __inode_attach_wb: [__inode_attach_wb] 2025/10/28 15:01:18 coverage filter: __lruvec_stat_mod_folio: [] 2025/10/28 15:01:18 coverage filter: __mem_cgroup_charge: [] 2025/10/28 15:01:18 coverage filter: __mem_cgroup_free: [] 2025/10/28 15:01:18 coverage filter: __mem_cgroup_handle_over_high: [] 2025/10/28 15:01:18 coverage filter: __mem_cgroup_try_charge_swap: [] 2025/10/28 15:01:18 coverage filter: __mem_cgroup_uncharge: [] 2025/10/28 15:01:18 coverage filter: __mem_cgroup_uncharge_folios: [] 2025/10/28 15:01:18 coverage filter: __mem_cgroup_usage_register_event: [] 2025/10/28 15:01:18 coverage filter: __mem_cgroup_usage_unregister_event: [] 2025/10/28 15:01:18 coverage filter: __memcg_kmem_charge_page: [] 2025/10/28 15:01:18 coverage filter: __memcg_kmem_uncharge_page: [] 2025/10/28 15:01:18 coverage filter: __memcg_slab_free_hook: [] 2025/10/28 15:01:18 coverage filter: __memcg_slab_post_alloc_hook: [] 2025/10/28 15:01:18 coverage filter: __migrate_device_pages: [__migrate_device_pages] 2025/10/28 15:01:18 coverage filter: __mod_lruvec_kmem_state: [] 2025/10/28 15:01:18 coverage filter: __node_reclaim: [__node_reclaim] 2025/10/28 15:01:18 coverage filter: __oom_kill_process: [__oom_kill_process] 2025/10/28 15:01:18 coverage filter: __page_cache_release: [__page_cache_release] 2025/10/28 15:01:18 coverage filter: __pfx_compact_folio_lruvec_lock_irqsave: [] 2025/10/28 15:01:18 coverage filter: __pfx_folio_lruvec_relock_irqsave: [] 2025/10/28 15:01:18 coverage filter: __pfx_folio_matches_lruvec: [] 2025/10/28 15:01:18 coverage filter: __pfx_folio_split_queue_lock_irqsave: [] 2025/10/28 15:01:18 coverage filter: __pfx_get_mem_cgroup_css_from_folio: [] 2025/10/28 15:01:18 coverage filter: __pfx_get_pfn_folio: [] 2025/10/28 15:01:18 coverage filter: __pfx_lru_gen_reparent_memcg: [] 2025/10/28 15:01:18 coverage filter: __pfx_lru_reparent_memcg: [] 2025/10/28 15:01:18 coverage filter: __pfx_lruvec_unlock: [] 2025/10/28 15:01:18 coverage filter: __pfx_lruvec_unlock_irq: [] 2025/10/28 15:01:18 coverage filter: __pfx_lruvec_unlock_irqrestore: [] 2025/10/28 15:01:18 coverage filter: __pfx_max_lru_gen_memcg: [] 2025/10/28 15:01:18 coverage filter: __pfx_recheck_lru_gen_max_memcg: [] 2025/10/28 15:01:18 coverage filter: __pfx_reparent_unlocks: [] 2025/10/28 15:01:18 coverage filter: __pte_alloc: [__pte_alloc __pte_alloc_kernel] 2025/10/28 15:01:18 coverage filter: __pte_alloc_kernel: [] 2025/10/28 15:01:18 coverage filter: __reset_isolation_suitable: [__reset_isolation_suitable] 2025/10/28 15:01:18 coverage filter: __se_sys_mlockall: [__se_sys_mlockall] 2025/10/28 15:01:18 coverage filter: __se_sys_move_pages: [__se_sys_move_pages] 2025/10/28 15:01:18 coverage filter: __se_sys_munlock: [__se_sys_munlock] 2025/10/28 15:01:18 coverage filter: __swap_writepage: [__swap_writepage] 2025/10/28 15:01:18 coverage filter: __vm_insert_mixed: [__vm_insert_mixed] 2025/10/28 15:01:18 coverage filter: __vmalloc_node_range_noprof: [__vmalloc_node_range_noprof] 2025/10/28 15:01:18 coverage filter: __wb_update_bandwidth: [__wb_update_bandwidth] 2025/10/28 15:01:18 coverage filter: __x64_sys_bpf: [__x64_sys_bpf] 2025/10/28 15:01:18 coverage filter: __x64_sys_mlock: [__x64_sys_mlock __x64_sys_mlock2 __x64_sys_mlockall] 2025/10/28 15:01:18 coverage filter: __x64_sys_mlock2: [] 2025/10/28 15:01:18 coverage filter: __x64_sys_munlockall: [] 2025/10/28 15:01:18 coverage filter: _vm_unmap_aliases: [_vm_unmap_aliases] 2025/10/28 15:01:18 coverage filter: alloc_charge_folio: [alloc_charge_folio] 2025/10/28 15:01:18 coverage filter: alloc_vmap_area: [__bpf_trace_alloc_vmap_area __probestub_alloc_vmap_area __traceiter_alloc_vmap_area alloc_vmap_area perf_trace_alloc_vmap_area trace_event_raw_event_alloc_vmap_area trace_raw_output_alloc_vmap_area] 2025/10/28 15:01:18 coverage filter: balance_dirty_pages: [__bpf_trace_balance_dirty_pages __probestub_balance_dirty_pages __traceiter_balance_dirty_pages balance_dirty_pages balance_dirty_pages_ratelimited balance_dirty_pages_ratelimited_flags perf_trace_balance_dirty_pages trace_balance_dirty_pages trace_event_raw_event_balance_dirty_pages trace_raw_output_balance_dirty_pages] 2025/10/28 15:01:18 coverage filter: balance_dirty_pages_ratelimited_flags: [] 2025/10/28 15:01:18 coverage filter: balance_wb_limits: [balance_wb_limits] 2025/10/28 15:01:18 coverage filter: bdi_split_work_to_wbs: [bdi_split_work_to_wbs] 2025/10/28 15:01:18 coverage filter: bio_associate_blkg_from_page: [bio_associate_blkg_from_page] 2025/10/28 15:01:18 coverage filter: bpf_link_prime: [bpf_link_prime] 2025/10/28 15:01:18 coverage filter: bpf_link_put_deferred: [bpf_link_put_deferred] 2025/10/28 15:01:18 coverage filter: bpf_map_copy_value: [bpf_map_copy_value] 2025/10/28 15:01:18 coverage filter: bpf_map_free_deferred: [bpf_map_free_deferred] 2025/10/28 15:01:18 coverage filter: bpf_map_save_memcg: [bpf_map_save_memcg] 2025/10/28 15:01:18 coverage filter: bpf_map_update_value: [bpf_map_update_value] 2025/10/28 15:01:18 coverage filter: bpf_mem_alloc_destroy: [bpf_mem_alloc_destroy] 2025/10/28 15:01:18 coverage filter: bpf_mem_alloc_init: [bpf_mem_alloc_init] 2025/10/28 15:01:18 coverage filter: bpf_mem_alloc_percpu_unit_init: [bpf_mem_alloc_percpu_unit_init] 2025/10/28 15:01:18 coverage filter: bpf_obj_get_info_by_fd: [bpf_obj_get_info_by_fd] 2025/10/28 15:01:18 coverage filter: bpf_prog_alloc_id: [bpf_prog_alloc_id] 2025/10/28 15:01:18 coverage filter: bpf_prog_attach: [bpf_prog_attach bpf_prog_attach_check_attach_type flow_dissector_bpf_prog_attach_check netns_bpf_prog_attach] 2025/10/28 15:01:18 coverage filter: bpf_prog_detach: [bpf_prog_detach netns_bpf_prog_detach] 2025/10/28 15:01:18 coverage filter: bpf_prog_load: [bpf_lsm_bpf_prog_load bpf_prog_load bpf_prog_load_check_attach security_bpf_prog_load] 2025/10/28 15:01:18 coverage filter: bpf_prog_put: [__bpf_prog_put_noref __bpf_prog_put_rcu bpf_prog_put bpf_prog_put_deferred] 2025/10/28 15:01:18 coverage filter: bpf_raw_tracepoint_open: [bpf_raw_tracepoint_open] 2025/10/28 15:01:18 coverage filter: bpf_sys_bpf: [bpf_sys_bpf] 2025/10/28 15:01:18 coverage filter: bpf_task_fd_query: [bpf_task_fd_query bpf_task_fd_query_copy] 2025/10/28 15:01:18 coverage filter: can_change_pmd_writable: [can_change_pmd_writable] 2025/10/28 15:01:18 coverage filter: can_split_folio: [can_split_folio] 2025/10/28 15:01:18 coverage filter: cgroup_id_from_mm: [cgroup_id_from_mm] 2025/10/28 15:01:18 coverage filter: cgroup_writeback_by_id: [cgroup_writeback_by_id] 2025/10/28 15:01:18 coverage filter: change_prot_numa: [change_prot_numa] 2025/10/28 15:01:18 coverage filter: charge_memcg: [] 2025/10/28 15:01:18 coverage filter: check_move_unevictable_folios: [check_move_unevictable_folios] 2025/10/28 15:01:18 coverage filter: cleanup_offline_cgwb: [cleanup_offline_cgwb cleanup_offline_cgwbs_workfn] 2025/10/28 15:01:18 coverage filter: compact_folio_lruvec_lock_irqsave: [compact_folio_lruvec_lock_irqsave] 2025/10/28 15:01:18 coverage filter: compact_zone: [compact_zone] 2025/10/28 15:01:18 coverage filter: compaction_alloc: [compaction_alloc] 2025/10/28 15:01:18 coverage filter: compaction_proactiveness_sysctl_handler: [compaction_proactiveness_sysctl_handler] 2025/10/28 15:01:18 coverage filter: copy_page_range: [copy_page_range] 2025/10/28 15:01:18 coverage filter: copy_pmd_range: [copy_pmd_range] 2025/10/28 15:01:18 coverage filter: copy_remote_vm_str: [copy_remote_vm_str] 2025/10/28 15:01:18 coverage filter: count_memcg_event_mm: [count_memcg_event_mm count_memcg_event_mm] 2025/10/28 15:01:18 coverage filter: count_memcg_events: [] 2025/10/28 15:01:18 coverage filter: count_memcg_folio_events: [count_memcg_folio_events count_memcg_folio_events] 2025/10/28 15:01:18 coverage filter: count_swpout_vm_event: [count_swpout_vm_event] 2025/10/28 15:01:18 coverage filter: current_obj_cgroup: [] 2025/10/28 15:01:18 coverage filter: current_objcg_update: [] 2025/10/28 15:01:18 coverage filter: damon_folio_mkold: [damon_folio_mkold damon_folio_mkold_one] 2025/10/28 15:01:18 coverage filter: damon_folio_young: [damon_folio_young damon_folio_young_one] 2025/10/28 15:01:18 coverage filter: damos_folio_filter_match: [damos_folio_filter_match] 2025/10/28 15:01:18 coverage filter: dax_iomap_fault: [dax_iomap_fault] 2025/10/28 15:01:18 coverage filter: deactivate_file_folio: [deactivate_file_folio] 2025/10/28 15:01:18 coverage filter: deferred_split_folio: [deferred_split_folio] 2025/10/28 15:01:18 coverage filter: deferred_split_scan: [deferred_split_scan] 2025/10/28 15:01:18 coverage filter: dirty_bytes_handler: [dirty_bytes_handler] 2025/10/28 15:01:18 coverage filter: dirty_ratio_handler: [dirty_ratio_handler] 2025/10/28 15:01:18 coverage filter: do_huge_pmd_anonymous_page: [do_huge_pmd_anonymous_page] 2025/10/28 15:01:18 coverage filter: do_huge_pmd_wp_page: [do_huge_pmd_wp_page] 2025/10/28 15:01:18 coverage filter: do_shrink_slab: [do_shrink_slab] 2025/10/28 15:01:18 coverage filter: do_try_to_free_pages: [do_try_to_free_pages] 2025/10/28 15:01:18 coverage filter: do_wp_page: [do_wp_page] 2025/10/28 15:01:18 coverage filter: drain_local_memcg_stock: [] 2025/10/28 15:01:18 coverage filter: drain_local_obj_stock: [] 2025/10/28 15:01:18 coverage filter: drain_obj_stock: [] 2025/10/28 15:01:18 coverage filter: evict_folios: [evict_folios] 2025/10/28 15:01:18 coverage filter: filemap_migrate_folio: [filemap_migrate_folio] 2025/10/28 15:01:18 coverage filter: flush_memcg_stats_dwork: [] 2025/10/28 15:01:18 coverage filter: folio_activate: [folio_activate] 2025/10/28 15:01:18 coverage filter: folio_add_lru: [folio_add_lru folio_add_lru_vma] 2025/10/28 15:01:18 coverage filter: folio_alloc_buffers: [folio_alloc_buffers] 2025/10/28 15:01:18 coverage filter: folio_batch_move_lru: [folio_batch_move_lru] 2025/10/28 15:01:18 coverage filter: folio_deactivate: [folio_deactivate] 2025/10/28 15:01:18 coverage filter: folio_inc_gen: [folio_inc_gen] 2025/10/28 15:01:18 coverage filter: folio_isolate_lru: [folio_isolate_lru] 2025/10/28 15:01:18 coverage filter: folio_lruvec: [folio_lruvec folio_lruvec_relock_irq folio_lruvec_relock_irq folio_lruvec_relock_irqsave] 2025/10/28 15:01:18 coverage filter: folio_lruvec_lock: [] 2025/10/28 15:01:18 coverage filter: folio_lruvec_lock_irq: [] 2025/10/28 15:01:18 coverage filter: folio_lruvec_lock_irqsave: [] 2025/10/28 15:01:18 coverage filter: folio_lruvec_relock_irq: [] 2025/10/28 15:01:18 coverage filter: folio_lruvec_relock_irqsave: [] 2025/10/28 15:01:18 coverage filter: folio_mark_accessed: [folio_mark_accessed] 2025/10/28 15:01:18 coverage filter: folio_mark_lazyfree: [folio_mark_lazyfree] 2025/10/28 15:01:18 coverage filter: folio_matches_lruvec: [folio_matches_lruvec folio_matches_lruvec] 2025/10/28 15:01:18 coverage filter: folio_memcg: [folio_memcg folio_memcg folio_memcg folio_memcg folio_memcg folio_memcg folio_memcg folio_memcg] 2025/10/28 15:01:18 coverage filter: folio_split_memcg_refs: [] 2025/10/28 15:01:18 coverage filter: folio_split_queue_lock: [folio_split_queue_lock folio_split_queue_lock_irqsave] 2025/10/28 15:01:18 coverage filter: folio_split_queue_lock_irqsave: [] 2025/10/28 15:01:18 coverage filter: folio_try_dup_anon_rmap_pmd: [folio_try_dup_anon_rmap_pmd] 2025/10/28 15:01:18 coverage filter: folio_try_share_anon_rmap_pmd: [folio_try_share_anon_rmap_pmd] 2025/10/28 15:01:18 coverage filter: folios_put_refs: [folios_put_refs] 2025/10/28 15:01:18 coverage filter: follow_pfnmap_start: [follow_pfnmap_start] 2025/10/28 15:01:18 coverage filter: free_percpu: [__bpf_trace_percpu_free_percpu __free_percpu_irq __probestub_percpu_free_percpu __traceiter_percpu_free_percpu devm_free_percpu free_percpu free_percpu_irq free_percpu_nmi metadata_dst_free_percpu perf_trace_percpu_free_percpu trace_event_raw_event_percpu_free_percpu trace_percpu_free_percpu trace_raw_output_percpu_free_percpu] 2025/10/28 15:01:18 coverage filter: free_unmap_vmap_area: [free_unmap_vmap_area] 2025/10/28 15:01:18 coverage filter: get_mem_cgroup_css_from_folio: [] 2025/10/28 15:01:18 coverage filter: get_mem_cgroup_from_current: [] 2025/10/28 15:01:18 coverage filter: get_mem_cgroup_from_folio: [] 2025/10/28 15:01:18 coverage filter: get_mem_cgroup_from_mm: [] 2025/10/28 15:01:18 coverage filter: get_mem_cgroup_from_objcg: [get_mem_cgroup_from_objcg] 2025/10/28 15:01:18 coverage filter: get_obj_cgroup_from_current: [get_obj_cgroup_from_current] 2025/10/28 15:01:18 coverage filter: get_obj_cgroup_from_folio: [] 2025/10/28 15:01:18 coverage filter: get_pfn_folio: [get_pfn_folio] 2025/10/28 15:01:18 coverage filter: get_pmd_pfn: [get_pmd_pfn] 2025/10/28 15:01:18 coverage filter: get_pte_pfn: [get_pte_pfn] 2025/10/28 15:01:18 coverage filter: handle_mm_fault: [handle_mm_fault] 2025/10/28 15:01:18 coverage filter: high_work_func: [] 2025/10/28 15:01:18 coverage filter: inc_max_seq: [inc_max_seq try_to_inc_max_seq] 2025/10/28 15:01:18 coverage filter: inode_cgwb_move_to_attached: [inode_cgwb_move_to_attached] 2025/10/28 15:01:18 coverage filter: inode_switch_wbs: [__bpf_trace_inode_switch_wbs __bpf_trace_inode_switch_wbs_queue __probestub_inode_switch_wbs __probestub_inode_switch_wbs_queue __traceiter_inode_switch_wbs __traceiter_inode_switch_wbs_queue inode_switch_wbs inode_switch_wbs_work_fn perf_trace_inode_switch_wbs perf_trace_inode_switch_wbs_queue trace_event_raw_event_inode_switch_wbs trace_event_raw_event_inode_switch_wbs_queue trace_inode_switch_wbs_queue trace_raw_output_inode_switch_wbs trace_raw_output_inode_switch_wbs_queue] 2025/10/28 15:01:18 coverage filter: inode_switch_wbs_work_fn: [] 2025/10/28 15:01:18 coverage filter: insert_page: [bxt_vtd_ggtt_insert_page__BKL bxt_vtd_ggtt_insert_page__cb dpt_insert_page gen6_ggtt_insert_page gen8_ggtt_insert_page gen8_ggtt_insert_page_bind gmch_ggtt_insert_page insert_page insert_page_into_pte_locked intel_gmch_gtt_insert_page intel_gmch_gtt_insert_pages null_insert_page vm_insert_page vm_insert_pages vmf_insert_page_mkwrite] 2025/10/28 15:01:18 coverage filter: invalid_folio_referenced_vma: [invalid_folio_referenced_vma] 2025/10/28 15:01:18 coverage filter: isolate_freepages_block: [isolate_freepages_block] 2025/10/28 15:01:18 coverage filter: isolate_lru_folios: [isolate_lru_folios] 2025/10/28 15:01:18 coverage filter: isolate_migratepages_block: [isolate_migratepages_block] 2025/10/28 15:01:18 coverage filter: kcompactd: [__bpf_trace_kcompactd_wake_template __bpf_trace_mm_compaction_kcompactd_sleep __probestub_mm_compaction_kcompactd_sleep __probestub_mm_compaction_kcompactd_wake __probestub_mm_compaction_wakeup_kcompactd __traceiter_mm_compaction_kcompactd_sleep __traceiter_mm_compaction_kcompactd_wake __traceiter_mm_compaction_wakeup_kcompactd kcompactd kcompactd_run kcompactd_stop perf_trace_kcompactd_wake_template perf_trace_mm_compaction_kcompactd_sleep trace_event_raw_event_kcompactd_wake_template trace_event_raw_event_mm_compaction_kcompactd_sleep trace_mm_compaction_wakeup_kcompactd trace_raw_output_kcompactd_wake_template trace_raw_output_mm_compaction_kcompactd_sleep wakeup_kcompactd] 2025/10/28 15:01:18 coverage filter: kern_sys_bpf: [kern_sys_bpf] 2025/10/28 15:01:18 coverage filter: kswapd: [__bpf_trace_mm_vmscan_kswapd_sleep __bpf_trace_mm_vmscan_kswapd_wake __bpf_trace_mm_vmscan_wakeup_kswapd __probestub_mm_vmscan_kswapd_sleep __probestub_mm_vmscan_kswapd_wake __probestub_mm_vmscan_wakeup_kswapd __traceiter_mm_vmscan_kswapd_sleep __traceiter_mm_vmscan_kswapd_wake __traceiter_mm_vmscan_wakeup_kswapd kswapd kswapd_run kswapd_stop perf_trace_mm_vmscan_kswapd_sleep perf_trace_mm_vmscan_kswapd_wake perf_trace_mm_vmscan_wakeup_kswapd prepare_kswapd_sleep trace_event_raw_event_mm_vmscan_kswapd_sleep trace_event_raw_event_mm_vmscan_kswapd_wake trace_event_raw_event_mm_vmscan_wakeup_kswapd trace_raw_output_mm_vmscan_kswapd_sleep trace_raw_output_mm_vmscan_kswapd_wake trace_raw_output_mm_vmscan_wakeup_kswapd wakeup_kswapd] 2025/10/28 15:01:18 coverage filter: link_create: [__netlink_create __probestub_ocfs2_symlink_create __traceiter_ocfs2_symlink_create bpf_struct_ops_link_create dsa_port_phylink_create fw_devlink_create_devlink link_create mdev_link_create_link_store netlink_create netns_bpf_link_create phylink_create rtnl_newlink_create sock_map_link_create tipc_link_create tipc_link_create_dummy_tnl_msg trace_ocfs2_symlink_create] 2025/10/28 15:01:18 coverage filter: locked_inode_to_wb_and_lock_list: [locked_inode_to_wb_and_lock_list] 2025/10/28 15:01:18 coverage filter: lru_activate: [__bpf_trace_mm_lru_activate __probestub_mm_lru_activate __traceiter_mm_lru_activate lru_activate perf_trace_mm_lru_activate trace_event_raw_event_mm_lru_activate trace_raw_output_mm_lru_activate] 2025/10/28 15:01:18 coverage filter: lru_add: [__lru_add_drain_all __probestub_nfsd_file_lru_add __traceiter_nfsd_file_lru_add d_lru_add list_lru_add list_lru_add_obj lru_add lru_add_drain lru_add_drain_all lru_add_drain_cpu lru_add_drain_cpu_zone lru_add_drain_per_cpu lru_add_split_folio] 2025/10/28 15:01:18 coverage filter: lru_deactivate: [lru_deactivate lru_deactivate_file] 2025/10/28 15:01:18 coverage filter: lru_deactivate_file: [] 2025/10/28 15:01:18 coverage filter: lru_gen_clear_refs: [lru_gen_clear_refs] 2025/10/28 15:01:18 coverage filter: lru_gen_look_around: [lru_gen_look_around] 2025/10/28 15:01:18 coverage filter: lru_gen_reparent_memcg: [lru_gen_reparent_memcg] 2025/10/28 15:01:18 coverage filter: lru_gen_seq_write: [lru_gen_seq_write] 2025/10/28 15:01:18 coverage filter: lru_lazyfree: [lru_lazyfree] 2025/10/28 15:01:18 coverage filter: lru_move_tail: [drm_gem_lru_move_tail drm_gem_lru_move_tail_locked lru_move_tail] 2025/10/28 15:01:18 coverage filter: lru_reparent_memcg: [lru_reparent_memcg] 2025/10/28 15:01:18 coverage filter: lruvec_is_sizable: [lruvec_is_sizable] 2025/10/28 15:01:18 coverage filter: lruvec_unlock: [lruvec_unlock lruvec_unlock_irq lruvec_unlock_irqrestore] 2025/10/28 15:01:18 coverage filter: lruvec_unlock_irq: [] 2025/10/28 15:01:18 coverage filter: lruvec_unlock_irqrestore: [] 2025/10/28 15:01:18 coverage filter: madvise_free_huge_pmd: [madvise_free_huge_pmd] 2025/10/28 15:01:18 coverage filter: map_anon_folio_pmd_pf: [map_anon_folio_pmd_pf] 2025/10/28 15:01:18 coverage filter: map_delete_elem: [__fd_array_map_delete_elem arena_map_delete_elem array_map_delete_elem bloom_map_delete_elem bpf_map_delete_elem bpf_struct_ops_map_delete_elem cpu_map_delete_elem dev_map_delete_elem fd_array_map_delete_elem htab_lru_map_delete_elem htab_map_delete_elem map_delete_elem nsim_map_delete_elem queue_stack_map_delete_elem ringbuf_map_delete_elem sock_map_delete_elem stack_map_delete_elem xsk_map_delete_elem] 2025/10/28 15:01:18 coverage filter: map_lookup_and_delete_elem: [__htab_map_lookup_and_delete_elem htab_lru_map_lookup_and_delete_elem htab_lru_percpu_map_lookup_and_delete_elem htab_map_lookup_and_delete_elem htab_percpu_map_lookup_and_delete_elem map_lookup_and_delete_elem stack_map_lookup_and_delete_elem] 2025/10/28 15:01:18 coverage filter: map_lookup_elem: [__htab_map_lookup_elem arena_map_lookup_elem array_map_lookup_elem array_of_map_lookup_elem bloom_map_lookup_elem bpf_fd_array_map_lookup_elem bpf_fd_htab_map_lookup_elem bpf_map_lookup_elem bpf_struct_ops_map_lookup_elem cpu_map_lookup_elem dev_map_lookup_elem fd_array_map_lookup_elem htab_lru_map_lookup_elem htab_lru_map_lookup_elem_sys htab_lru_percpu_map_lookup_elem htab_map_lookup_elem htab_of_map_lookup_elem htab_percpu_map_lookup_elem map_lookup_elem nsim_map_lookup_elem percpu_array_map_lookup_elem queue_stack_map_lookup_elem ringbuf_map_lookup_elem stack_map_lookup_elem xsk_map_lookup_elem xsk_map_lookup_elem_sys_only] 2025/10/28 15:01:18 coverage filter: map_update_elem: [__htab_lru_percpu_map_update_elem arena_map_update_elem array_map_update_elem bloom_map_update_elem bpf_fd_array_map_update_elem bpf_fd_htab_map_update_elem bpf_map_update_elem bpf_struct_ops_map_update_elem cpu_map_update_elem dev_map_update_elem htab_lru_map_update_elem htab_lru_percpu_map_update_elem htab_map_update_elem htab_map_update_elem_in_place htab_percpu_map_update_elem map_update_elem nsim_map_update_elem queue_stack_map_update_elem ringbuf_map_update_elem sock_map_update_elem sock_map_update_elem_sys stack_map_update_elem xsk_map_update_elem] 2025/10/28 15:01:18 coverage filter: max_lru_gen_memcg: [max_lru_gen_memcg] 2025/10/28 15:01:18 coverage filter: mem_cgroup_charge_hugetlb: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_css_free: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_css_offline: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_css_online: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_exit: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_from_slab_obj: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_get_oom_group: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_id_get_online: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_id_put_many: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_iter: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_iter_break: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_migrate: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_oom_synchronize: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_replace_folio: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_scan_tasks: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_sk_alloc: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_sk_free: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_sk_inherit: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_swap_full: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_swapin_charge_folio: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_track_foreign_dirty_slowpath: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_wb_stats: [] 2025/10/28 15:01:18 coverage filter: mem_cgroup_write: [] 2025/10/28 15:01:18 coverage filter: memcg1_check_events: [] 2025/10/28 15:01:18 coverage filter: memcg1_soft_limit_reclaim: [] 2025/10/28 15:01:18 coverage filter: memcg1_swapout: [] 2025/10/28 15:01:18 coverage filter: memcg_event_remove: [] 2025/10/28 15:01:18 coverage filter: memcg_hotplug_cpu_dead: [] 2025/10/28 15:01:18 coverage filter: memcg_numa_stat_show: [] 2025/10/28 15:01:18 coverage filter: memcg_write_event_control: [] 2025/10/28 15:01:18 coverage filter: memory_max_write: [] 2025/10/28 15:01:18 coverage filter: memory_numa_stat_show: [] 2025/10/28 15:01:18 coverage filter: memory_stat_format: [] 2025/10/28 15:01:18 coverage filter: memory_stat_show: [] 2025/10/28 15:01:18 coverage filter: migrate_folio: [aio_migrate_folio btree_migrate_folio btrfs_migrate_folio buffer_migrate_folio buffer_migrate_folio_norefs damon_migrate_folio_list hugetlbfs_migrate_folio kvm_gmem_migrate_folio metapage_migrate_folio migrate_folio migrate_folio_add migrate_folio_done migrate_folio_undo_dst migrate_folio_undo_src nfs_migrate_folio secretmem_migrate_folio] 2025/10/28 15:01:18 coverage filter: migrate_folio_done: [] 2025/10/28 15:01:18 coverage filter: migrate_huge_page_move_mapping: [migrate_huge_page_move_mapping] 2025/10/28 15:01:18 coverage filter: migrate_pages: [__bpf_trace_mm_migrate_pages __bpf_trace_mm_migrate_pages_start __ia32_sys_migrate_pages __probestub_mm_migrate_pages __probestub_mm_migrate_pages_start __se_sys_migrate_pages __traceiter_mm_migrate_pages __traceiter_mm_migrate_pages_start __x64_sys_migrate_pages damon_migrate_pages do_migrate_pages migrate_pages migrate_pages_batch perf_trace_mm_migrate_pages perf_trace_mm_migrate_pages_start trace_event_raw_event_mm_migrate_pages trace_event_raw_event_mm_migrate_pages_start trace_raw_output_mm_migrate_pages trace_raw_output_mm_migrate_pages_start] 2025/10/28 15:01:18 coverage filter: migrate_pages_batch: [] 2025/10/28 15:01:18 coverage filter: mlock_drain_remote: [mlock_drain_remote] 2025/10/28 15:01:18 coverage filter: mlock_folio: [mlock_folio mlock_folio_batch] 2025/10/28 15:01:18 coverage filter: mlock_folio_batch: [] 2025/10/28 15:01:18 coverage filter: mlock_new_folio: [mlock_new_folio] 2025/10/28 15:01:18 coverage filter: mm_get_huge_zero_folio: [mm_get_huge_zero_folio] 2025/10/28 15:01:18 coverage filter: mod_memcg_page_state: [mod_memcg_page_state] 2025/10/28 15:01:18 coverage filter: mod_objcg_mlstate: [] 2025/10/28 15:01:18 coverage filter: move_folios_to_lru: [move_folios_to_lru] 2025/10/28 15:01:18 coverage filter: move_pages_and_store_status: [move_pages_and_store_status] 2025/10/28 15:01:18 coverage filter: munlock_folio: [munlock_folio] 2025/10/28 15:01:18 coverage filter: numa_migrate_check: [numa_migrate_check] 2025/10/28 15:01:18 coverage filter: obj_cgroup_charge_account: [] 2025/10/28 15:01:18 coverage filter: obj_cgroup_charge_pages: [] 2025/10/28 15:01:18 coverage filter: obj_cgroup_charge_zswap: [] 2025/10/28 15:01:18 coverage filter: obj_cgroup_may_zswap: [] 2025/10/28 15:01:18 coverage filter: obj_cgroup_put: [obj_cgroup_put] 2025/10/28 15:01:18 coverage filter: obj_cgroup_release: [] 2025/10/28 15:01:18 coverage filter: obj_cgroup_uncharge_zswap: [] 2025/10/28 15:01:18 coverage filter: page_cgroup_ino: [] 2025/10/28 15:01:18 coverage filter: pcpu_alloc_area: [pcpu_alloc_area] 2025/10/28 15:01:18 coverage filter: pcpu_alloc_noprof: [pcpu_alloc_noprof] 2025/10/28 15:01:18 coverage filter: pcpu_balance_workfn: [pcpu_balance_workfn] 2025/10/28 15:01:18 coverage filter: pcpu_create_chunk: [pcpu_create_chunk] 2025/10/28 15:01:18 coverage filter: pcpu_find_block_fit: [pcpu_find_block_fit] 2025/10/28 15:01:18 coverage filter: pcpu_free_area: [pcpu_free_area] 2025/10/28 15:01:18 coverage filter: pcpu_memcg_post_alloc_hook: [pcpu_memcg_post_alloc_hook] 2025/10/28 15:01:18 coverage filter: pcpu_populate_chunk: [pcpu_populate_chunk] 2025/10/28 15:01:18 coverage filter: pcpu_reintegrate_chunk: [pcpu_reintegrate_chunk] 2025/10/28 15:01:18 coverage filter: perf_trace_mm_lru_insertion: [perf_trace_mm_lru_insertion] 2025/10/28 15:01:18 coverage filter: perf_trace_track_foreign_dirty: [perf_trace_track_foreign_dirty] 2025/10/28 15:01:18 coverage filter: prepare_kswapd_sleep: [] 2025/10/28 15:01:18 coverage filter: read_page_owner: [read_page_owner] 2025/10/28 15:01:18 coverage filter: recheck_lru_gen_max_memcg: [recheck_lru_gen_max_memcg] 2025/10/28 15:01:18 coverage filter: reclaim_folio_list: [reclaim_folio_list] 2025/10/28 15:01:18 coverage filter: redirty_tail_locked: [redirty_tail_locked] 2025/10/28 15:01:18 coverage filter: refill_obj_stock: [] 2025/10/28 15:01:18 coverage filter: refill_stock: [] 2025/10/28 15:01:18 coverage filter: release_free_list: [release_free_list] 2025/10/28 15:01:18 coverage filter: remove_device_exclusive_entry: [remove_device_exclusive_entry] 2025/10/28 15:01:18 coverage filter: remove_vm_area: [remove_vm_area] 2025/10/28 15:01:18 coverage filter: reparent_shrinker_deferred: [reparent_shrinker_deferred] 2025/10/28 15:01:18 coverage filter: reparent_unlocks: [] 2025/10/28 15:01:18 coverage filter: set_pmd_migration_entry: [set_pmd_migration_entry] 2025/10/28 15:01:18 coverage filter: shadow_lru_isolate: [shadow_lru_isolate] 2025/10/28 15:01:18 coverage filter: shmem_swapin_folio: [shmem_swapin_folio] 2025/10/28 15:01:18 coverage filter: shrink_active_list: [shrink_active_list] 2025/10/28 15:01:18 coverage filter: shrink_folio_list: [shrink_folio_list] 2025/10/28 15:01:18 coverage filter: shrink_lruvec: [shrink_lruvec try_to_shrink_lruvec] 2025/10/28 15:01:18 coverage filter: shrink_memcg_cb: [shrink_memcg_cb] 2025/10/28 15:01:18 coverage filter: shrink_node: [mem_cgroup_shrink_node shrink_node] 2025/10/28 15:01:18 coverage filter: shrink_slab: [__bpf_trace_mm_shrink_slab_end __bpf_trace_mm_shrink_slab_start __probestub_mm_shrink_slab_end __probestub_mm_shrink_slab_start __traceiter_mm_shrink_slab_end __traceiter_mm_shrink_slab_start perf_trace_mm_shrink_slab_end perf_trace_mm_shrink_slab_start shrink_slab trace_event_raw_event_mm_shrink_slab_end trace_event_raw_event_mm_shrink_slab_start trace_raw_output_mm_shrink_slab_end trace_raw_output_mm_shrink_slab_start] 2025/10/28 15:01:18 coverage filter: shrink_worker: [mb_cache_shrink_worker shrink_worker] 2025/10/28 15:01:18 coverage filter: sio_read_complete: [sio_read_complete] 2025/10/28 15:01:18 coverage filter: split_huge_pages_all: [split_huge_pages_all] 2025/10/28 15:01:18 coverage filter: split_huge_pages_in_file: [split_huge_pages_in_file] 2025/10/28 15:01:18 coverage filter: split_huge_pages_write: [split_huge_pages_write] 2025/10/28 15:01:18 coverage filter: split_huge_pmd_locked: [split_huge_pmd_locked] 2025/10/28 15:01:18 coverage filter: split_page_memcg: [] 2025/10/28 15:01:18 coverage filter: swap_read_folio: [swap_read_folio] 2025/10/28 15:01:18 coverage filter: swap_writeout: [swap_writeout] 2025/10/28 15:01:18 coverage filter: trace_event_raw_event_mm_lru_insertion: [trace_event_raw_event_mm_lru_insertion] 2025/10/28 15:01:18 coverage filter: trace_event_raw_event_track_foreign_dirty: [trace_event_raw_event_track_foreign_dirty] 2025/10/28 15:01:18 coverage filter: try_charge_memcg: [] 2025/10/28 15:01:18 coverage filter: try_restore_exclusive_pte: [try_restore_exclusive_pte] 2025/10/28 15:01:18 coverage filter: try_to_compact_pages: [__bpf_trace_mm_compaction_try_to_compact_pages __probestub_mm_compaction_try_to_compact_pages __traceiter_mm_compaction_try_to_compact_pages perf_trace_mm_compaction_try_to_compact_pages trace_event_raw_event_mm_compaction_try_to_compact_pages trace_raw_output_mm_compaction_try_to_compact_pages try_to_compact_pages] 2025/10/28 15:01:18 coverage filter: try_to_free_pages: [try_to_free_pages] 2025/10/28 15:01:18 coverage filter: try_to_inc_max_seq: [] 2025/10/28 15:01:18 coverage filter: try_to_shrink_lruvec: [] 2025/10/28 15:01:18 coverage filter: uncharge_batch: [] 2025/10/28 15:01:18 coverage filter: uncharge_folio: [__hugetlb_cgroup_uncharge_folio hugetlb_cgroup_uncharge_folio hugetlb_cgroup_uncharge_folio_rsvd] 2025/10/28 15:01:18 coverage filter: unmap_huge_pmd_locked: [unmap_huge_pmd_locked] 2025/10/28 15:01:18 coverage filter: unmap_page_range: [unmap_page_range] 2025/10/28 15:01:18 coverage filter: vm_insert_pages: [] 2025/10/28 15:01:18 coverage filter: vm_map_ram: [vm_map_ram] 2025/10/28 15:01:18 coverage filter: vm_unmap_ram: [vm_unmap_ram] 2025/10/28 15:01:18 coverage filter: vmap_pfn_apply: [vmap_pfn_apply] 2025/10/28 15:01:18 coverage filter: walk_pmd_range_locked: [walk_pmd_range_locked] 2025/10/28 15:01:18 coverage filter: walk_pud_range: [walk_pud_range] 2025/10/28 15:01:18 coverage filter: walk_update_folio: [walk_update_folio] 2025/10/28 15:01:18 coverage filter: wb_dirty_limits: [wb_dirty_limits] 2025/10/28 15:01:18 coverage filter: wb_put: [wb_put] 2025/10/28 15:01:18 coverage filter: wb_workfn: [wb_workfn] 2025/10/28 15:01:18 coverage filter: wbc_account_cgroup_owner: [wbc_account_cgroup_owner] 2025/10/28 15:01:18 coverage filter: wbc_detach_inode: [wbc_detach_inode] 2025/10/28 15:01:18 coverage filter: workingset_activation: [workingset_activation] 2025/10/28 15:01:18 coverage filter: workingset_eviction: [workingset_eviction] 2025/10/28 15:01:18 coverage filter: workingset_refault: [workingset_refault] 2025/10/28 15:01:18 coverage filter: writeback_inodes_wb: [__writeback_inodes_wb writeback_inodes_wb] 2025/10/28 15:01:18 coverage filter: writeback_sb_inodes: [__bpf_trace_writeback_sb_inodes_requeue __probestub_writeback_sb_inodes_requeue __traceiter_writeback_sb_inodes_requeue perf_trace_writeback_sb_inodes_requeue trace_event_raw_event_writeback_sb_inodes_requeue trace_raw_output_writeback_sb_inodes_requeue writeback_sb_inodes] 2025/10/28 15:01:18 coverage filter: writeback_single_inode: [__bpf_trace_writeback_single_inode_template __probestub_writeback_single_inode __probestub_writeback_single_inode_start __traceiter_writeback_single_inode __traceiter_writeback_single_inode_start __writeback_single_inode perf_trace_writeback_single_inode_template trace_event_raw_event_writeback_single_inode_template trace_raw_output_writeback_single_inode_template writeback_single_inode] 2025/10/28 15:01:18 coverage filter: zap_huge_pmd: [zap_huge_pmd] 2025/10/28 15:01:18 coverage filter: zswap_current_read: [] 2025/10/28 15:01:18 coverage filter: zswap_entry_free: [zswap_entry_free] 2025/10/28 15:01:18 coverage filter: zswap_folio_swapin: [zswap_folio_swapin] 2025/10/28 15:01:18 coverage filter: zswap_invalidate: [zswap_invalidate] 2025/10/28 15:01:18 coverage filter: zswap_load: [zswap_load] 2025/10/28 15:01:18 coverage filter: zswap_store: [zswap_store] 2025/10/28 15:01:18 coverage filter: fs/buffer.c: [fs/buffer.c fs/hpfs/buffer.c] 2025/10/28 15:01:18 coverage filter: fs/fs-writeback.c: [fs/fs-writeback.c] 2025/10/28 15:01:18 coverage filter: include/linux/memcontrol.h: [] 2025/10/28 15:01:18 coverage filter: include/linux/mm_inline.h: [] 2025/10/28 15:01:18 coverage filter: include/linux/mmzone.h: [] 2025/10/28 15:01:18 coverage filter: include/trace/events/writeback.h: [] 2025/10/28 15:01:18 coverage filter: mm/compaction.c: [mm/compaction.c] 2025/10/28 15:01:18 coverage filter: mm/huge_memory.c: [mm/huge_memory.c] 2025/10/28 15:01:18 coverage filter: mm/memcontrol-v1.c: [] 2025/10/28 15:01:18 coverage filter: mm/memcontrol.c: [] 2025/10/28 15:01:18 coverage filter: mm/migrate.c: [mm/migrate.c] 2025/10/28 15:01:18 coverage filter: mm/mlock.c: [mm/mlock.c] 2025/10/28 15:01:18 coverage filter: mm/page_io.c: [mm/page_io.c] 2025/10/28 15:01:18 coverage filter: mm/percpu.c: [mm/percpu.c] 2025/10/28 15:01:18 coverage filter: mm/shrinker.c: [mm/shrinker.c] 2025/10/28 15:01:18 coverage filter: mm/swap.c: [mm/swap.c mm/swap_cgroup.c] 2025/10/28 15:01:18 coverage filter: mm/vmscan.c: [mm/vmscan.c] 2025/10/28 15:01:18 coverage filter: mm/workingset.c: [mm/workingset.c] 2025/10/28 15:01:18 coverage filter: mm/zswap.c: [mm/zswap.c] 2025/10/28 15:01:18 area "symbols": 21793 PCs in the cover filter 2025/10/28 15:01:18 area "files": 24481 PCs in the cover filter 2025/10/28 15:01:18 area "": 0 PCs in the cover filter 2025/10/28 15:01:18 executor cover filter: 0 PCs 2025/10/28 15:01:22 machine check: disabled the following syscalls: fsetxattr$security_selinux : selinux is not enabled fsetxattr$security_smack_transmute : smack is not enabled fsetxattr$smack_xattr_label : smack is not enabled get_thread_area : syscall get_thread_area is not present lookup_dcookie : syscall lookup_dcookie is not present lsetxattr$security_selinux : selinux is not enabled lsetxattr$security_smack_transmute : smack is not enabled lsetxattr$smack_xattr_label : smack is not enabled mount$esdfs : /proc/filesystems does not contain esdfs mount$incfs : /proc/filesystems does not contain incremental-fs openat$acpi_thermal_rel : failed to open /dev/acpi_thermal_rel: no such file or directory openat$ashmem : failed to open /dev/ashmem: no such file or directory openat$bifrost : failed to open /dev/bifrost: no such file or directory openat$binder : failed to open /dev/binder: no such file or directory openat$camx : failed to open /dev/v4l/by-path/platform-soc@0:qcom_cam-req-mgr-video-index0: no such file or directory openat$capi20 : failed to open /dev/capi20: no such file or directory openat$cdrom1 : failed to open /dev/cdrom1: no such file or directory openat$damon_attrs : failed to open /sys/kernel/debug/damon/attrs: no such file or directory openat$damon_init_regions : failed to open /sys/kernel/debug/damon/init_regions: no such file or directory openat$damon_kdamond_pid : failed to open /sys/kernel/debug/damon/kdamond_pid: no such file or directory openat$damon_mk_contexts : failed to open /sys/kernel/debug/damon/mk_contexts: no such file or directory openat$damon_monitor_on : failed to open /sys/kernel/debug/damon/monitor_on: no such file or directory openat$damon_rm_contexts : failed to open /sys/kernel/debug/damon/rm_contexts: no such file or directory openat$damon_schemes : failed to open /sys/kernel/debug/damon/schemes: no such file or directory openat$damon_target_ids : failed to open /sys/kernel/debug/damon/target_ids: no such file or directory openat$hwbinder : failed to open /dev/hwbinder: no such file or directory openat$i915 : failed to open /dev/i915: no such file or directory openat$img_rogue : failed to open /dev/img-rogue: no such file or directory openat$irnet : failed to open /dev/irnet: no such file or directory openat$keychord : failed to open /dev/keychord: no such file or directory openat$kvm : failed to open /dev/kvm: no such file or directory openat$lightnvm : failed to open /dev/lightnvm/control: no such file or directory openat$mali : failed to open /dev/mali0: no such file or directory openat$md : failed to open /dev/md0: no such file or directory openat$msm : failed to open /dev/msm: no such file or directory openat$ndctl0 : failed to open /dev/ndctl0: no such file or directory openat$nmem0 : failed to open /dev/nmem0: no such file or directory openat$pktcdvd : failed to open /dev/pktcdvd/control: no such file or directory openat$pmem0 : failed to open /dev/pmem0: no such file or directory openat$proc_capi20 : failed to open /proc/capi/capi20: no such file or directory openat$proc_capi20ncci : failed to open /proc/capi/capi20ncci: no such file or directory openat$proc_reclaim : failed to open /proc/self/reclaim: no such file or directory openat$ptp1 : failed to open /dev/ptp1: no such file or directory openat$rnullb : failed to open /dev/rnullb0: no such file or directory openat$selinux_access : failed to open /selinux/access: no such file or directory openat$selinux_attr : selinux is not enabled openat$selinux_avc_cache_stats : failed to open /selinux/avc/cache_stats: no such file or directory openat$selinux_avc_cache_threshold : failed to open /selinux/avc/cache_threshold: no such file or directory openat$selinux_avc_hash_stats : failed to open /selinux/avc/hash_stats: no such file or directory openat$selinux_checkreqprot : failed to open /selinux/checkreqprot: no such file or directory openat$selinux_commit_pending_bools : failed to open /selinux/commit_pending_bools: no such file or directory openat$selinux_context : failed to open /selinux/context: no such file or directory openat$selinux_create : failed to open /selinux/create: no such file or directory openat$selinux_enforce : failed to open /selinux/enforce: no such file or directory openat$selinux_load : failed to open /selinux/load: no such file or directory openat$selinux_member : failed to open /selinux/member: no such file or directory openat$selinux_mls : failed to open /selinux/mls: no such file or directory openat$selinux_policy : failed to open /selinux/policy: no such file or directory openat$selinux_relabel : failed to open /selinux/relabel: no such file or directory openat$selinux_status : failed to open /selinux/status: no such file or directory openat$selinux_user : failed to open /selinux/user: no such file or directory openat$selinux_validatetrans : failed to open /selinux/validatetrans: no such file or directory openat$sev : failed to open /dev/sev: no such file or directory openat$sgx_provision : failed to open /dev/sgx_provision: no such file or directory openat$smack_task_current : smack is not enabled openat$smack_thread_current : smack is not enabled openat$smackfs_access : failed to open /sys/fs/smackfs/access: no such file or directory openat$smackfs_ambient : failed to open /sys/fs/smackfs/ambient: no such file or directory openat$smackfs_change_rule : failed to open /sys/fs/smackfs/change-rule: no such file or directory openat$smackfs_cipso : failed to open /sys/fs/smackfs/cipso: no such file or directory openat$smackfs_cipsonum : failed to open /sys/fs/smackfs/direct: no such file or directory openat$smackfs_ipv6host : failed to open /sys/fs/smackfs/ipv6host: no such file or directory openat$smackfs_load : failed to open /sys/fs/smackfs/load: no such file or directory openat$smackfs_logging : failed to open /sys/fs/smackfs/logging: no such file or directory openat$smackfs_netlabel : failed to open /sys/fs/smackfs/netlabel: no such file or directory openat$smackfs_onlycap : failed to open /sys/fs/smackfs/onlycap: no such file or directory openat$smackfs_ptrace : failed to open /sys/fs/smackfs/ptrace: no such file or directory openat$smackfs_relabel_self : failed to open /sys/fs/smackfs/relabel-self: no such file or directory openat$smackfs_revoke_subject : failed to open /sys/fs/smackfs/revoke-subject: no such file or directory openat$smackfs_syslog : failed to open /sys/fs/smackfs/syslog: no such file or directory openat$smackfs_unconfined : failed to open /sys/fs/smackfs/unconfined: no such file or directory openat$tlk_device : failed to open /dev/tlk_device: no such file or directory openat$trusty : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_avb : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_gatekeeper : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwkey : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwrng : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km_secure : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_storage : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$tty : failed to open /dev/tty: no such device or address openat$uverbs0 : failed to open /dev/infiniband/uverbs0: no such file or directory openat$vfio : failed to open /dev/vfio/vfio: no such file or directory openat$vndbinder : failed to open /dev/vndbinder: no such file or directory openat$vtpm : failed to open /dev/vtpmx: no such file or directory openat$xenevtchn : failed to open /dev/xen/evtchn: no such file or directory openat$zygote : failed to open /dev/socket/zygote: no such file or directory pkey_alloc : pkey_alloc(0x0, 0x0) failed: no space left on device read$smackfs_access : smack is not enabled read$smackfs_cipsonum : smack is not enabled read$smackfs_logging : smack is not enabled read$smackfs_ptrace : smack is not enabled set_thread_area : syscall set_thread_area is not present setxattr$security_selinux : selinux is not enabled setxattr$security_smack_transmute : smack is not enabled setxattr$smack_xattr_label : smack is not enabled socket$hf : socket$hf(0x13, 0x2, 0x0) failed: address family not supported by protocol socket$inet6_dccp : socket$inet6_dccp(0xa, 0x6, 0x0) failed: socket type not supported socket$inet_dccp : socket$inet_dccp(0x2, 0x6, 0x0) failed: socket type not supported socket$vsock_dgram : socket$vsock_dgram(0x28, 0x2, 0x0) failed: no such device syz_btf_id_by_name$bpf_lsm : failed to open /sys/kernel/btf/vmlinux: no such file or directory syz_init_net_socket$bt_cmtp : syz_init_net_socket$bt_cmtp(0x1f, 0x3, 0x5) failed: protocol not supported syz_kvm_setup_cpu$ppc64 : unsupported arch syz_mount_image$bcachefs : /proc/filesystems does not contain bcachefs syz_mount_image$ntfs : /proc/filesystems does not contain ntfs syz_mount_image$reiserfs : /proc/filesystems does not contain reiserfs syz_mount_image$sysv : /proc/filesystems does not contain sysv syz_mount_image$v7 : /proc/filesystems does not contain v7 syz_open_dev$dricontrol : failed to open /dev/dri/controlD#: no such file or directory syz_open_dev$drirender : failed to open /dev/dri/renderD#: no such file or directory syz_open_dev$floppy : failed to open /dev/fd#: no such file or directory syz_open_dev$ircomm : failed to open /dev/ircomm#: no such file or directory syz_open_dev$sndhw : failed to open /dev/snd/hwC#D#: no such file or directory syz_pkey_set : pkey_alloc(0x0, 0x0) failed: no space left on device uselib : syscall uselib is not present write$selinux_access : selinux is not enabled write$selinux_attr : selinux is not enabled write$selinux_context : selinux is not enabled write$selinux_create : selinux is not enabled write$selinux_load : selinux is not enabled write$selinux_user : selinux is not enabled write$selinux_validatetrans : selinux is not enabled write$smack_current : smack is not enabled write$smackfs_access : smack is not enabled write$smackfs_change_rule : smack is not enabled write$smackfs_cipso : smack is not enabled write$smackfs_cipsonum : smack is not enabled write$smackfs_ipv6host : smack is not enabled write$smackfs_label : smack is not enabled write$smackfs_labels_list : smack is not enabled write$smackfs_load : smack is not enabled write$smackfs_logging : smack is not enabled write$smackfs_netlabel : smack is not enabled write$smackfs_ptrace : smack is not enabled transitively disabled the following syscalls (missing resource [creating syscalls]): bind$vsock_dgram : sock_vsock_dgram [socket$vsock_dgram] close$ibv_device : fd_rdma [openat$uverbs0] connect$hf : sock_hf [socket$hf] connect$vsock_dgram : sock_vsock_dgram [socket$vsock_dgram] getsockopt$inet6_dccp_buf : sock_dccp6 [socket$inet6_dccp] getsockopt$inet6_dccp_int : sock_dccp6 [socket$inet6_dccp] getsockopt$inet_dccp_buf : sock_dccp [socket$inet_dccp] getsockopt$inet_dccp_int : sock_dccp [socket$inet_dccp] ioctl$ACPI_THERMAL_GET_ART : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_ART_COUNT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_ART_LEN : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT_COUNT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT_LEN : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ASHMEM_GET_NAME : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_PIN_STATUS : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_PROT_MASK : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_SIZE : fd_ashmem [openat$ashmem] ioctl$ASHMEM_PURGE_ALL_CACHES : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_NAME : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_PROT_MASK : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_SIZE : fd_ashmem [openat$ashmem] ioctl$CAPI_CLR_FLAGS : fd_capi20 [openat$capi20] ioctl$CAPI_GET_ERRCODE : fd_capi20 [openat$capi20] ioctl$CAPI_GET_FLAGS : fd_capi20 [openat$capi20] ioctl$CAPI_GET_MANUFACTURER : fd_capi20 [openat$capi20] ioctl$CAPI_GET_PROFILE : fd_capi20 [openat$capi20] ioctl$CAPI_GET_SERIAL : fd_capi20 [openat$capi20] ioctl$CAPI_INSTALLED : fd_capi20 [openat$capi20] ioctl$CAPI_MANUFACTURER_CMD : fd_capi20 [openat$capi20] ioctl$CAPI_NCCI_GETUNIT : fd_capi20 [openat$capi20] ioctl$CAPI_NCCI_OPENCOUNT : fd_capi20 [openat$capi20] ioctl$CAPI_REGISTER : fd_capi20 [openat$capi20] ioctl$CAPI_SET_FLAGS : fd_capi20 [openat$capi20] ioctl$CREATE_COUNTERS : fd_rdma [openat$uverbs0] ioctl$DESTROY_COUNTERS : fd_rdma [openat$uverbs0] ioctl$DRM_IOCTL_I915_GEM_BUSY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_DESTROY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER2 : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER2_WR : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_APERTURE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_CACHING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_TILING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MADVISE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP_GTT : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP_OFFSET : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PREAD : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PWRITE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_CACHING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_DOMAIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_TILING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SW_FINISH : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_THROTTLE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_UNPIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_USERPTR : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_VM_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_VM_DESTROY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_WAIT : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GET_RESET_STATS : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_OVERLAY_ATTRS : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_OVERLAY_PUT_IMAGE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_ADD_CONFIG : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_OPEN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_REMOVE_CONFIG : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_QUERY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_REG_READ : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_SET_SPRITE_COLORKEY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_MSM_GEM_CPU_FINI : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_CPU_PREP : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_INFO : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_MADVISE : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_NEW : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_SUBMIT : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GET_PARAM : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SET_PARAM : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_NEW : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_QUERY : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_WAIT_FENCE : fd_msm [openat$msm] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPEXEC: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_HTBUFFER_HTBLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_CHANGESPARSEMEM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTPIN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMREXPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRGETUID: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRIMPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNREFPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLWRITEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_CONNECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_DISCONNECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMSET: fd_rogue [openat$img_rogue] ioctl$FLOPPY_FDCLRPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDDEFPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDEJECT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFLUSH : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTBEG : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTEND : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTTRK : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVTYP : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETFDCSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETMAXERRS : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDMSGOFF : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDMSGON : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDPOLLDRVSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDRAWCMD : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDRESET : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETDRVPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETEMSGTRESH : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETMAXERRS : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDTWADDLE : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDWERRORCLR : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDWERRORGET : fd_floppy [syz_open_dev$floppy] ioctl$KBASE_HWCNT_READER_CLEAR : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_DISABLE_EVENT : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_DUMP : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_ENABLE_EVENT : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_API_VERSION : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_API_VERSION_WITH_FEATURES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER_SIZE : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER_WITH_CYCLES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_HWVER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_PUT_BUFFER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_PUT_BUFFER_WITH_CYCLES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_SET_INTERVAL : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_IOCTL_BUFFER_LIVENESS_UPDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CONTEXT_PRIORITY_CHECK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_CPU_QUEUE_DUMP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_EVENT_SIGNAL : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_GET_GLB_IFACE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_BIND : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_KICK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_REGISTER : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_REGISTER_EX : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_TERMINATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_TERM : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_DISJOINT_QUERY : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_FENCE_VALIDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_CONTEXT_ID : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_CPU_GPU_TIMEINFO : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_DDK_VERSION : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_GPUPROPS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_CLEAR : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_DUMP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_ENABLE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_READER_SETUP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_SET : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_JOB_SUBMIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_DELETE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_ENQUEUE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_CMD : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_GET_SAMPLE : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_PUT_SAMPLE : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALIAS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALLOC : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALLOC_EX : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_COMMIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_EXEC_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FIND_CPU_OFFSET : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET: fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FLAGS_CHANGE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FREE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_IMPORT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT_10_2 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT_11_5 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_PROFILE_ADD : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_QUERY : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_SYNC : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_POST_TERM : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_READ_USER_PAGE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SET_FLAGS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SET_LIMITED_CORE_COUNT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SOFT_EVENT_UPDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STICKY_RESOURCE_MAP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STICKY_RESOURCE_UNMAP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STREAM_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_TLSTREAM_ACQUIRE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_TLSTREAM_FLUSH : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_VERSION_CHECK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_VERSION_CHECK_RESERVED : fd_bifrost [openat$bifrost openat$mali] ioctl$KVM_ASSIGN_SET_MSIX_ENTRY : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_ASSIGN_SET_MSIX_NR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DIRTY_LOG_RING : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DIRTY_LOG_RING_ACQ_REL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DISABLE_QUIRKS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DISABLE_QUIRKS2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_ENFORCE_PV_FEATURE_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_EXCEPTION_PAYLOAD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_EXIT_HYPERCALL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_EXIT_ON_EMULATION_FAILURE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HALT_POLL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_DIRECT_TLBFLUSH : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_ENFORCE_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_ENLIGHTENED_VMCS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_SEND_IPI : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_SYNIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_SYNIC2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_TLBFLUSH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_VP_INDEX : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MAX_VCPU_ID : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MEMORY_FAULT_INFO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MSR_PLATFORM_INFO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_PMU_CAPABILITY : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_PTP_KVM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SGX_ATTRIBUTE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SPLIT_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_STEAL_TIME : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SYNC_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_VM_COPY_ENC_CONTEXT_FROM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_DISABLE_NX_HUGE_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_TYPES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X2APIC_API : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_APIC_BUS_CYCLES_NS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_BUS_LOCK_EXIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_DISABLE_EXITS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_GUEST_MODE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_NOTIFY_VMEXIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_USER_SPACE_MSR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_XEN_HVM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CHECK_EXTENSION : fd_kvm [openat$kvm] ioctl$KVM_CHECK_EXTENSION_VM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CLEAR_DIRTY_LOG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_DEVICE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_GUEST_MEMFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_VCPU : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_VM : fd_kvm [openat$kvm] ioctl$KVM_DIRTY_TLB : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_API_VERSION : fd_kvm [openat$kvm] ioctl$KVM_GET_CLOCK : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_CPUID2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEBUGREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_GET_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_DIRTY_LOG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_EMULATED_CPUID : fd_kvm [openat$kvm] ioctl$KVM_GET_FPU : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_LAPIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MP_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MSRS_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MSRS_sys : fd_kvm [openat$kvm] ioctl$KVM_GET_MSR_FEATURE_INDEX_LIST : fd_kvm [openat$kvm] ioctl$KVM_GET_MSR_INDEX_LIST : fd_kvm [openat$kvm] ioctl$KVM_GET_NESTED_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_NR_MMU_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_ONE_REG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_PIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_REG_LIST : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SREGS2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_STATS_FD_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_STATS_FD_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_SUPPORTED_CPUID : fd_kvm [openat$kvm] ioctl$KVM_GET_SUPPORTED_HV_CPUID_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SUPPORTED_HV_CPUID_sys : fd_kvm [openat$kvm] ioctl$KVM_GET_TSC_KHZ_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_TSC_KHZ_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_VCPU_EVENTS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_VCPU_MMAP_SIZE : fd_kvm [openat$kvm] ioctl$KVM_GET_XCRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_XSAVE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_XSAVE2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_HAS_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_HAS_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_HAS_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_HYPERV_EVENTFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_INTERRUPT : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_IOEVENTFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQ_LINE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQ_LINE_STATUS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_KVMCLOCK_CTRL : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_MEMORY_ENCRYPT_REG_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_MEMORY_ENCRYPT_UNREG_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_NMI : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_PPC_ALLOCATE_HTAB : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_PRE_FAULT_MEMORY : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_REGISTER_COALESCED_MMIO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_REINJECT_CONTROL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_RESET_DIRTY_RINGS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_RUN : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_S390_VCPU_FAULT : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_BOOT_CPU_ID : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_CLOCK : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_CPUID2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEBUGREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_SET_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_FPU : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_GSI_ROUTING : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_GUEST_DEBUG_x86 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_IDENTITY_MAP_ADDR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_LAPIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_MEMORY_ATTRIBUTES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_MP_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_MSRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_NESTED_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_NR_MMU_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_ONE_REG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_PIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SIGNAL_MASK : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SREGS2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_TSC_KHZ_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_TSC_KHZ_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_TSS_ADDR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_USER_MEMORY_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_USER_MEMORY_REGION2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_VAPIC_ADDR : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_VCPU_EVENTS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_XCRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_XSAVE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SEV_CERT_EXPORT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_DBG_DECRYPT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_DBG_ENCRYPT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_ES_INIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_GET_ATTESTATION_REPORT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_GUEST_STATUS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_INIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_INIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_MEASURE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_SECRET : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_CANCEL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_UPDATE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SIGNAL_MSI : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SMI : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_TPR_ACCESS_REPORTING : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_TRANSLATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_UNREGISTER_COALESCED_MMIO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_X86_GET_MCE_CAP_SUPPORTED : fd_kvm [openat$kvm] ioctl$KVM_X86_SETUP_MCE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_X86_SET_MCE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_X86_SET_MSR_FILTER : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_XEN_HVM_CONFIG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$PERF_EVENT_IOC_DISABLE : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_ENABLE : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_ID : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_MODIFY_ATTRIBUTES : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_PAUSE_OUTPUT : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_PERIOD : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_QUERY_BPF : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_REFRESH : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_RESET : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_BPF : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_FILTER : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_OUTPUT : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$READ_COUNTERS : fd_rdma [openat$uverbs0] ioctl$SNDRV_FIREWIRE_IOCTL_GET_INFO : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_LOCK : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_TASCAM_STATE : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_UNLOCK : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_DSP_LOAD : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_DSP_STATUS : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_INFO : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_PVERSION : fd_snd_hw [syz_open_dev$sndhw] ioctl$TE_IOCTL_CLOSE_CLIENT_SESSION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_LAUNCH_OPERATION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_OPEN_CLIENT_SESSION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_SS_CMD : fd_tlk [openat$tlk_device] ioctl$TIPC_IOC_CONNECT : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] ioctl$TIPC_IOC_CONNECT_avb : fd_trusty_avb [openat$trusty_avb] ioctl$TIPC_IOC_CONNECT_gatekeeper : fd_trusty_gatekeeper [openat$trusty_gatekeeper] ioctl$TIPC_IOC_CONNECT_hwkey : fd_trusty_hwkey [openat$trusty_hwkey] ioctl$TIPC_IOC_CONNECT_hwrng : fd_trusty_hwrng [openat$trusty_hwrng] ioctl$TIPC_IOC_CONNECT_keymaster_secure : fd_trusty_km_secure [openat$trusty_km_secure] ioctl$TIPC_IOC_CONNECT_km : fd_trusty_km [openat$trusty_km] ioctl$TIPC_IOC_CONNECT_storage : fd_trusty_storage [openat$trusty_storage] ioctl$VFIO_CHECK_EXTENSION : fd_vfio [openat$vfio] ioctl$VFIO_GET_API_VERSION : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_GET_INFO : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_MAP_DMA : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_UNMAP_DMA : fd_vfio [openat$vfio] ioctl$VFIO_SET_IOMMU : fd_vfio [openat$vfio] ioctl$VTPM_PROXY_IOC_NEW_DEV : fd_vtpm [openat$vtpm] ioctl$sock_bt_cmtp_CMTPCONNADD : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPCONNDEL : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPGETCONNINFO : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPGETCONNLIST : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] mmap$DRM_I915 : fd_i915 [openat$i915] mmap$DRM_MSM : fd_msm [openat$msm] mmap$KVM_VCPU : vcpu_mmap_size [ioctl$KVM_GET_VCPU_MMAP_SIZE] mmap$bifrost : fd_bifrost [openat$bifrost openat$mali] mmap$perf : fd_perf [perf_event_open perf_event_open$cgroup] pkey_free : pkey [pkey_alloc] pkey_mprotect : pkey [pkey_alloc] read$sndhw : fd_snd_hw [syz_open_dev$sndhw] read$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] recvmsg$hf : sock_hf [socket$hf] sendmsg$hf : sock_hf [socket$hf] setsockopt$inet6_dccp_buf : sock_dccp6 [socket$inet6_dccp] setsockopt$inet6_dccp_int : sock_dccp6 [socket$inet6_dccp] setsockopt$inet_dccp_buf : sock_dccp [socket$inet_dccp] setsockopt$inet_dccp_int : sock_dccp [socket$inet_dccp] syz_kvm_add_vcpu$x86 : kvm_syz_vm$x86 [syz_kvm_setup_syzos_vm$x86] syz_kvm_assert_syzos_kvm_exit$x86 : kvm_run_ptr [mmap$KVM_VCPU] syz_kvm_assert_syzos_uexit$x86 : kvm_run_ptr [mmap$KVM_VCPU] syz_kvm_setup_cpu$x86 : fd_kvmvm [ioctl$KVM_CREATE_VM] syz_kvm_setup_syzos_vm$x86 : fd_kvmvm [ioctl$KVM_CREATE_VM] syz_memcpy_off$KVM_EXIT_HYPERCALL : kvm_run_ptr [mmap$KVM_VCPU] syz_memcpy_off$KVM_EXIT_MMIO : kvm_run_ptr [mmap$KVM_VCPU] write$ALLOC_MW : fd_rdma [openat$uverbs0] write$ALLOC_PD : fd_rdma [openat$uverbs0] write$ATTACH_MCAST : fd_rdma [openat$uverbs0] write$CLOSE_XRCD : fd_rdma [openat$uverbs0] write$CREATE_AH : fd_rdma [openat$uverbs0] write$CREATE_COMP_CHANNEL : fd_rdma [openat$uverbs0] write$CREATE_CQ : fd_rdma [openat$uverbs0] write$CREATE_CQ_EX : fd_rdma [openat$uverbs0] write$CREATE_FLOW : fd_rdma [openat$uverbs0] write$CREATE_QP : fd_rdma [openat$uverbs0] write$CREATE_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$CREATE_SRQ : fd_rdma [openat$uverbs0] write$CREATE_WQ : fd_rdma [openat$uverbs0] write$DEALLOC_MW : fd_rdma [openat$uverbs0] write$DEALLOC_PD : fd_rdma [openat$uverbs0] write$DEREG_MR : fd_rdma [openat$uverbs0] write$DESTROY_AH : fd_rdma [openat$uverbs0] write$DESTROY_CQ : fd_rdma [openat$uverbs0] write$DESTROY_FLOW : fd_rdma [openat$uverbs0] write$DESTROY_QP : fd_rdma [openat$uverbs0] write$DESTROY_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$DESTROY_SRQ : fd_rdma [openat$uverbs0] write$DESTROY_WQ : fd_rdma [openat$uverbs0] write$DETACH_MCAST : fd_rdma [openat$uverbs0] write$MLX5_ALLOC_PD : fd_rdma [openat$uverbs0] write$MLX5_CREATE_CQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_DV_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_SRQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_WQ : fd_rdma [openat$uverbs0] write$MLX5_GET_CONTEXT : fd_rdma [openat$uverbs0] write$MLX5_MODIFY_WQ : fd_rdma [openat$uverbs0] write$MODIFY_QP : fd_rdma [openat$uverbs0] write$MODIFY_SRQ : fd_rdma [openat$uverbs0] write$OPEN_XRCD : fd_rdma [openat$uverbs0] write$POLL_CQ : fd_rdma [openat$uverbs0] write$POST_RECV : fd_rdma [openat$uverbs0] write$POST_SEND : fd_rdma [openat$uverbs0] write$POST_SRQ_RECV : fd_rdma [openat$uverbs0] write$QUERY_DEVICE_EX : fd_rdma [openat$uverbs0] write$QUERY_PORT : fd_rdma [openat$uverbs0] write$QUERY_QP : fd_rdma [openat$uverbs0] write$QUERY_SRQ : fd_rdma [openat$uverbs0] write$REG_MR : fd_rdma [openat$uverbs0] write$REQ_NOTIFY_CQ : fd_rdma [openat$uverbs0] write$REREG_MR : fd_rdma [openat$uverbs0] write$RESIZE_CQ : fd_rdma [openat$uverbs0] write$capi20 : fd_capi20 [openat$capi20] write$capi20_data : fd_capi20 [openat$capi20] write$damon_attrs : fd_damon_attrs [openat$damon_attrs] write$damon_contexts : fd_damon_contexts [openat$damon_mk_contexts openat$damon_rm_contexts] write$damon_init_regions : fd_damon_init_regions [openat$damon_init_regions] write$damon_monitor_on : fd_damon_monitor_on [openat$damon_monitor_on] write$damon_schemes : fd_damon_schemes [openat$damon_schemes] write$damon_target_ids : fd_damon_target_ids [openat$damon_target_ids] write$proc_reclaim : fd_proc_reclaim [openat$proc_reclaim] write$sndhw : fd_snd_hw [syz_open_dev$sndhw] write$sndhw_fireworks : fd_snd_hw [syz_open_dev$sndhw] write$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] write$trusty_avb : fd_trusty_avb [openat$trusty_avb] write$trusty_gatekeeper : fd_trusty_gatekeeper [openat$trusty_gatekeeper] write$trusty_hwkey : fd_trusty_hwkey [openat$trusty_hwkey] write$trusty_hwrng : fd_trusty_hwrng [openat$trusty_hwrng] write$trusty_km : fd_trusty_km [openat$trusty_km] write$trusty_km_secure : fd_trusty_km_secure [openat$trusty_km_secure] write$trusty_storage : fd_trusty_storage [openat$trusty_storage] BinFmtMisc : enabled Comparisons : enabled Coverage : enabled DelayKcovMmap : enabled DevlinkPCI : PCI device 0000:00:10.0 is not available ExtraCoverage : enabled Fault : enabled KCSAN : write(/sys/kernel/debug/kcsan, on) failed KcovResetIoctl : kernel does not support ioctl(KCOV_RESET_TRACE) LRWPANEmulation : enabled Leak : failed to write(kmemleak, "scan=off") NetDevices : enabled NetInjection : enabled NicVF : PCI device 0000:00:11.0 is not available SandboxAndroid : setfilecon: setxattr failed. (errno 1: Operation not permitted). . process exited with status 67. SandboxNamespace : enabled SandboxNone : enabled SandboxSetuid : enabled Swap : enabled USBEmulation : enabled VhciInjection : enabled WifiEmulation : enabled syscalls : 3838/8056 2025/10/28 15:01:22 new: machine check complete 2025/10/28 15:01:22 new: adding 81237 seeds 2025/10/28 15:03:30 crash "KASAN: slab-out-of-bounds Read in change_page_attr_set_clr" is already known 2025/10/28 15:03:30 base crash "KASAN: slab-out-of-bounds Read in change_page_attr_set_clr" is to be ignored 2025/10/28 15:03:30 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:03:42 crash "KASAN: slab-out-of-bounds Read in change_page_attr_set_clr" is already known 2025/10/28 15:03:42 base crash "KASAN: slab-out-of-bounds Read in change_page_attr_set_clr" is to be ignored 2025/10/28 15:03:42 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:03:52 crash "KASAN: slab-out-of-bounds Read in change_page_attr_set_clr" is already known 2025/10/28 15:03:52 base crash "KASAN: slab-out-of-bounds Read in change_page_attr_set_clr" is to be ignored 2025/10/28 15:03:52 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:03:56 base crash: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr 2025/10/28 15:04:27 runner 1 connected 2025/10/28 15:04:31 runner 5 connected 2025/10/28 15:04:46 runner 1 connected 2025/10/28 15:04:49 runner 8 connected 2025/10/28 15:05:09 STAT { "buffer too small": 0, "candidate triage jobs": 61, "candidates": 76398, "comps overflows": 0, "corpus": 4729, "corpus [files]": 3045, "corpus [symbols]": 3365, "cover overflows": 3418, "coverage": 161064, "distributor delayed": 4796, "distributor undelayed": 4796, "distributor violated": 2, "exec candidate": 4839, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 0, "exec seeds": 0, "exec smash": 0, "exec total [base]": 7477, "exec total [new]": 21854, "exec triage": 15204, "executor restarts [base]": 54, "executor restarts [new]": 110, "fault jobs": 0, "fuzzer jobs": 61, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 9, "hints jobs": 0, "max signal": 162768, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 4839, "no exec duration": 42746000000, "no exec requests": 353, "pending": 0, "prog exec time": 228, "reproducing": 0, "rpc recv": 1194807952, "rpc sent": 105017056, "signal": 158226, "smash jobs": 0, "triage jobs": 0, "vm output": 2154605, "vm restarts [base]": 4, "vm restarts [new]": 12 } 2025/10/28 15:05:43 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 15:06:10 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:06:10 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:06:14 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:06:21 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:06:21 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:06:26 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:06:40 runner 7 connected 2025/10/28 15:07:00 runner 1 connected 2025/10/28 15:07:03 runner 5 connected 2025/10/28 15:07:10 runner 2 connected 2025/10/28 15:07:15 runner 8 connected 2025/10/28 15:07:54 patched crashed: unregister_netdevice: waiting for DEV to become free [need repro = true] 2025/10/28 15:07:54 scheduled a reproduction of 'unregister_netdevice: waiting for DEV to become free' 2025/10/28 15:08:24 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:08:24 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:08:27 base crash: unregister_netdevice: waiting for DEV to become free 2025/10/28 15:08:28 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:08:28 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:08:35 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:08:35 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:08:38 crash "WARNING in xfrm_state_fini" is already known 2025/10/28 15:08:38 base crash "WARNING in xfrm_state_fini" is to be ignored 2025/10/28 15:08:38 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/10/28 15:08:39 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:08:39 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:08:49 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:08:49 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:08:51 base crash: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr 2025/10/28 15:08:51 runner 4 connected 2025/10/28 15:09:13 runner 0 connected 2025/10/28 15:09:16 runner 2 connected 2025/10/28 15:09:17 runner 2 connected 2025/10/28 15:09:25 runner 1 connected 2025/10/28 15:09:26 runner 5 connected 2025/10/28 15:09:28 runner 3 connected 2025/10/28 15:09:38 runner 6 connected 2025/10/28 15:09:39 runner 0 connected 2025/10/28 15:10:09 STAT { "buffer too small": 0, "candidate triage jobs": 61, "candidates": 71349, "comps overflows": 0, "corpus": 9728, "corpus [files]": 5256, "corpus [symbols]": 5740, "cover overflows": 7154, "coverage": 200018, "distributor delayed": 11412, "distributor undelayed": 11412, "distributor violated": 237, "exec candidate": 9888, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 4, "exec seeds": 0, "exec smash": 0, "exec total [base]": 17572, "exec total [new]": 45298, "exec triage": 31012, "executor restarts [base]": 71, "executor restarts [new]": 163, "fault jobs": 0, "fuzzer jobs": 61, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 9, "hints jobs": 0, "max signal": 201871, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 9888, "no exec duration": 45120000000, "no exec requests": 356, "pending": 8, "prog exec time": 251, "reproducing": 0, "rpc recv": 2338251108, "rpc sent": 225805048, "signal": 196435, "smash jobs": 0, "triage jobs": 0, "vm output": 4358436, "vm restarts [base]": 6, "vm restarts [new]": 24 } 2025/10/28 15:10:12 base crash: WARNING in xfrm_state_fini 2025/10/28 15:11:08 runner 2 connected 2025/10/28 15:13:21 crash "WARNING in xfrm6_tunnel_net_exit" is already known 2025/10/28 15:13:21 base crash "WARNING in xfrm6_tunnel_net_exit" is to be ignored 2025/10/28 15:13:21 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/10/28 15:13:26 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/10/28 15:14:18 runner 3 connected 2025/10/28 15:14:23 runner 8 connected 2025/10/28 15:14:37 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:14:37 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:14:45 base crash: INFO: task hung in sync_bdevs 2025/10/28 15:14:47 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:14:47 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:14:58 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:14:58 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:15:09 STAT { "buffer too small": 0, "candidate triage jobs": 35, "candidates": 66246, "comps overflows": 0, "corpus": 14806, "corpus [files]": 7305, "corpus [symbols]": 7927, "cover overflows": 10644, "coverage": 225300, "distributor delayed": 17093, "distributor undelayed": 17086, "distributor violated": 238, "exec candidate": 14991, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 5, "exec seeds": 0, "exec smash": 0, "exec total [base]": 24520, "exec total [new]": 68832, "exec triage": 46698, "executor restarts [base]": 93, "executor restarts [new]": 220, "fault jobs": 0, "fuzzer jobs": 35, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 6, "hints jobs": 0, "max signal": 227175, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 14991, "no exec duration": 45282000000, "no exec requests": 358, "pending": 11, "prog exec time": 298, "reproducing": 0, "rpc recv": 3092990688, "rpc sent": 344936208, "signal": 221431, "smash jobs": 0, "triage jobs": 0, "vm output": 7334843, "vm restarts [base]": 7, "vm restarts [new]": 26 } 2025/10/28 15:15:34 runner 1 connected 2025/10/28 15:15:43 runner 2 connected 2025/10/28 15:15:44 runner 7 connected 2025/10/28 15:15:55 runner 6 connected 2025/10/28 15:16:13 base crash: INFO: task hung in corrupted 2025/10/28 15:16:17 patched crashed: INFO: task hung in addrconf_verify_work [need repro = true] 2025/10/28 15:16:17 scheduled a reproduction of 'INFO: task hung in addrconf_verify_work' 2025/10/28 15:16:39 crash "INFO: task hung in reg_check_chans_work" is already known 2025/10/28 15:16:39 base crash "INFO: task hung in reg_check_chans_work" is to be ignored 2025/10/28 15:16:39 patched crashed: INFO: task hung in reg_check_chans_work [need repro = false] 2025/10/28 15:17:10 runner 1 connected 2025/10/28 15:17:15 runner 0 connected 2025/10/28 15:17:21 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:17:21 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:17:31 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:17:31 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:17:33 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 15:17:36 runner 5 connected 2025/10/28 15:17:42 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:17:42 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:17:52 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:17:52 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:18:04 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:18:04 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:18:06 crash "INFO: task hung in __iterate_supers" is already known 2025/10/28 15:18:06 base crash "INFO: task hung in __iterate_supers" is to be ignored 2025/10/28 15:18:06 patched crashed: INFO: task hung in __iterate_supers [need repro = false] 2025/10/28 15:18:17 runner 1 connected 2025/10/28 15:18:22 runner 6 connected 2025/10/28 15:18:29 runner 8 connected 2025/10/28 15:18:32 runner 4 connected 2025/10/28 15:18:41 runner 3 connected 2025/10/28 15:18:43 base crash: INFO: task hung in __iterate_supers 2025/10/28 15:18:53 runner 0 connected 2025/10/28 15:18:57 runner 2 connected 2025/10/28 15:19:38 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:19:38 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:19:40 runner 0 connected 2025/10/28 15:19:46 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:19:46 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:19:48 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:19:48 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:19:57 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:19:57 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:20:09 STAT { "buffer too small": 0, "candidate triage jobs": 35, "candidates": 62529, "comps overflows": 0, "corpus": 18478, "corpus [files]": 8774, "corpus [symbols]": 9401, "cover overflows": 12881, "coverage": 239397, "distributor delayed": 22667, "distributor undelayed": 22664, "distributor violated": 442, "exec candidate": 18708, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 5, "exec seeds": 0, "exec smash": 0, "exec total [base]": 32293, "exec total [new]": 86218, "exec triage": 58015, "executor restarts [base]": 119, "executor restarts [new]": 291, "fault jobs": 0, "fuzzer jobs": 35, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 5, "hints jobs": 0, "max signal": 241258, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 18708, "no exec duration": 45453000000, "no exec requests": 361, "pending": 21, "prog exec time": 327, "reproducing": 0, "rpc recv": 4117642864, "rpc sent": 469514944, "signal": 235546, "smash jobs": 0, "triage jobs": 0, "vm output": 9912900, "vm restarts [base]": 10, "vm restarts [new]": 38 } 2025/10/28 15:20:28 runner 7 connected 2025/10/28 15:20:42 runner 6 connected 2025/10/28 15:20:45 runner 0 connected 2025/10/28 15:20:46 runner 3 connected 2025/10/28 15:22:17 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:22:27 crash "general protection fault in pcl818_ai_cancel" is already known 2025/10/28 15:22:27 base crash "general protection fault in pcl818_ai_cancel" is to be ignored 2025/10/28 15:22:27 patched crashed: general protection fault in pcl818_ai_cancel [need repro = false] 2025/10/28 15:22:56 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/10/28 15:23:13 runner 4 connected 2025/10/28 15:23:23 runner 5 connected 2025/10/28 15:23:45 runner 0 connected 2025/10/28 15:24:39 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:24:39 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:24:44 crash "general protection fault in pcl818_ai_cancel" is already known 2025/10/28 15:24:44 base crash "general protection fault in pcl818_ai_cancel" is to be ignored 2025/10/28 15:24:44 patched crashed: general protection fault in pcl818_ai_cancel [need repro = false] 2025/10/28 15:24:50 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:24:50 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:25:00 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:25:00 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:25:09 STAT { "buffer too small": 0, "candidate triage jobs": 31, "candidates": 57376, "comps overflows": 0, "corpus": 23575, "corpus [files]": 10646, "corpus [symbols]": 11346, "cover overflows": 16681, "coverage": 254642, "distributor delayed": 28054, "distributor undelayed": 28051, "distributor violated": 447, "exec candidate": 23861, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 5, "exec seeds": 0, "exec smash": 0, "exec total [base]": 44685, "exec total [new]": 112220, "exec triage": 73903, "executor restarts [base]": 136, "executor restarts [new]": 337, "fault jobs": 0, "fuzzer jobs": 31, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 5, "hints jobs": 0, "max signal": 256585, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 23861, "no exec duration": 45453000000, "no exec requests": 361, "pending": 24, "prog exec time": 209, "reproducing": 0, "rpc recv": 5099133064, "rpc sent": 617891592, "signal": 250396, "smash jobs": 0, "triage jobs": 0, "vm output": 12521043, "vm restarts [base]": 10, "vm restarts [new]": 45 } 2025/10/28 15:25:23 base crash: INFO: task hung in read_part_sector 2025/10/28 15:25:33 runner 1 connected 2025/10/28 15:25:35 runner 4 connected 2025/10/28 15:25:39 runner 7 connected 2025/10/28 15:25:48 runner 0 connected 2025/10/28 15:26:00 crash "WARNING in xfrm6_tunnel_net_exit" is already known 2025/10/28 15:26:00 base crash "WARNING in xfrm6_tunnel_net_exit" is to be ignored 2025/10/28 15:26:00 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/10/28 15:26:10 base crash: WARNING in xfrm6_tunnel_net_exit 2025/10/28 15:26:13 runner 2 connected 2025/10/28 15:26:49 base crash: general protection fault in pcl818_ai_cancel 2025/10/28 15:26:50 runner 5 connected 2025/10/28 15:27:06 runner 0 connected 2025/10/28 15:27:27 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/10/28 15:27:38 runner 1 connected 2025/10/28 15:27:47 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:27:47 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:27:58 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:27:58 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:27:59 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:27:59 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:28:10 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:28:10 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:28:11 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:28:11 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:28:17 runner 1 connected 2025/10/28 15:28:20 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:28:20 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:28:31 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:28:31 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:28:36 runner 3 connected 2025/10/28 15:28:36 base crash: KASAN: slab-use-after-free Read in hdm_disconnect 2025/10/28 15:28:42 patched crashed: KASAN: slab-use-after-free Read in hdm_disconnect [need repro = false] 2025/10/28 15:28:47 runner 4 connected 2025/10/28 15:28:48 runner 6 connected 2025/10/28 15:28:59 runner 8 connected 2025/10/28 15:29:00 runner 7 connected 2025/10/28 15:29:11 runner 0 connected 2025/10/28 15:29:19 runner 2 connected 2025/10/28 15:29:26 runner 2 connected 2025/10/28 15:29:30 runner 5 connected 2025/10/28 15:29:42 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:29:42 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:29:47 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:29:47 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:29:47 base crash: WARNING in xfrm_state_fini 2025/10/28 15:29:52 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:29:52 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:29:59 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:29:59 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:30:03 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:30:03 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:30:09 STAT { "buffer too small": 0, "candidate triage jobs": 33, "candidates": 53312, "comps overflows": 0, "corpus": 27583, "corpus [files]": 12091, "corpus [symbols]": 12808, "cover overflows": 19224, "coverage": 265318, "distributor delayed": 33179, "distributor undelayed": 33167, "distributor violated": 452, "exec candidate": 27925, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 6, "exec seeds": 0, "exec smash": 0, "exec total [base]": 51875, "exec total [new]": 133083, "exec triage": 86281, "executor restarts [base]": 160, "executor restarts [new]": 416, "fault jobs": 0, "fuzzer jobs": 33, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 4, "hints jobs": 0, "max signal": 267354, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 27925, "no exec duration": 45521000000, "no exec requests": 363, "pending": 36, "prog exec time": 256, "reproducing": 0, "rpc recv": 6200563000, "rpc sent": 756074216, "signal": 260895, "smash jobs": 0, "triage jobs": 0, "vm output": 15233882, "vm restarts [base]": 14, "vm restarts [new]": 59 } 2025/10/28 15:30:31 runner 8 connected 2025/10/28 15:30:41 runner 2 connected 2025/10/28 15:30:43 runner 6 connected 2025/10/28 15:30:45 runner 0 connected 2025/10/28 15:30:47 runner 0 connected 2025/10/28 15:30:51 runner 1 connected 2025/10/28 15:32:15 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:32:15 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:32:19 crash "kernel BUG in txUnlock" is already known 2025/10/28 15:32:19 base crash "kernel BUG in txUnlock" is to be ignored 2025/10/28 15:32:19 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/10/28 15:32:20 crash "kernel BUG in txUnlock" is already known 2025/10/28 15:32:20 base crash "kernel BUG in txUnlock" is to be ignored 2025/10/28 15:32:20 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/10/28 15:32:22 crash "kernel BUG in txUnlock" is already known 2025/10/28 15:32:22 base crash "kernel BUG in txUnlock" is to be ignored 2025/10/28 15:32:22 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/10/28 15:32:26 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:32:26 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:32:36 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:32:36 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:32:52 base crash: kernel BUG in txUnlock 2025/10/28 15:33:03 runner 7 connected 2025/10/28 15:33:04 base crash: kernel BUG in txUnlock 2025/10/28 15:33:09 runner 0 connected 2025/10/28 15:33:11 runner 1 connected 2025/10/28 15:33:15 runner 4 connected 2025/10/28 15:33:17 runner 2 connected 2025/10/28 15:33:26 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:33:26 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:33:27 runner 5 connected 2025/10/28 15:33:36 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:33:36 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:33:41 runner 0 connected 2025/10/28 15:33:55 runner 2 connected 2025/10/28 15:34:13 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:34:13 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:34:15 runner 6 connected 2025/10/28 15:34:24 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:34:24 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:34:26 runner 7 connected 2025/10/28 15:34:46 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:34:46 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:34:56 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:34:56 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:35:03 runner 3 connected 2025/10/28 15:35:05 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:35:05 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:35:09 STAT { "buffer too small": 0, "candidate triage jobs": 43, "candidates": 49228, "comps overflows": 0, "corpus": 31603, "corpus [files]": 13488, "corpus [symbols]": 14243, "cover overflows": 22336, "coverage": 274356, "distributor delayed": 38363, "distributor undelayed": 38337, "distributor violated": 659, "exec candidate": 32009, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 7, "exec seeds": 0, "exec smash": 0, "exec total [base]": 61283, "exec total [new]": 156095, "exec triage": 98853, "executor restarts [base]": 175, "executor restarts [new]": 475, "fault jobs": 0, "fuzzer jobs": 43, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 3, "hints jobs": 0, "max signal": 276491, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 32009, "no exec duration": 45521000000, "no exec requests": 363, "pending": 46, "prog exec time": 255, "reproducing": 0, "rpc recv": 7315961896, "rpc sent": 893115200, "signal": 269855, "smash jobs": 0, "triage jobs": 0, "vm output": 17696541, "vm restarts [base]": 17, "vm restarts [new]": 73 } 2025/10/28 15:35:16 patched crashed: general protection fault in pcl818_ai_cancel [need repro = false] 2025/10/28 15:35:20 runner 0 connected 2025/10/28 15:35:27 patched crashed: general protection fault in pcl818_ai_cancel [need repro = false] 2025/10/28 15:35:34 runner 1 connected 2025/10/28 15:35:45 runner 4 connected 2025/10/28 15:36:01 runner 8 connected 2025/10/28 15:36:05 runner 6 connected 2025/10/28 15:36:23 runner 7 connected 2025/10/28 15:36:50 base crash: KASAN: slab-use-after-free Read in __ethtool_get_link_ksettings 2025/10/28 15:36:53 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:36:53 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:37:04 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:37:04 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:37:06 base crash: general protection fault in pcl818_ai_cancel 2025/10/28 15:37:47 runner 2 connected 2025/10/28 15:37:50 runner 3 connected 2025/10/28 15:38:02 runner 0 connected 2025/10/28 15:38:03 runner 8 connected 2025/10/28 15:38:23 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:38:35 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 15:39:12 runner 2 connected 2025/10/28 15:39:25 runner 8 connected 2025/10/28 15:40:09 STAT { "buffer too small": 0, "candidate triage jobs": 47, "candidates": 45016, "comps overflows": 0, "corpus": 35734, "corpus [files]": 14943, "corpus [symbols]": 15727, "cover overflows": 25194, "coverage": 283239, "distributor delayed": 42861, "distributor undelayed": 42860, "distributor violated": 676, "exec candidate": 36221, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 11, "exec seeds": 0, "exec smash": 0, "exec total [base]": 70758, "exec total [new]": 179370, "exec triage": 111642, "executor restarts [base]": 194, "executor restarts [new]": 563, "fault jobs": 0, "fuzzer jobs": 47, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 8, "hints jobs": 0, "max signal": 285559, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 36221, "no exec duration": 45521000000, "no exec requests": 363, "pending": 48, "prog exec time": 286, "reproducing": 0, "rpc recv": 8312818852, "rpc sent": 1053784448, "signal": 278742, "smash jobs": 0, "triage jobs": 0, "vm output": 22186246, "vm restarts [base]": 19, "vm restarts [new]": 83 } 2025/10/28 15:40:14 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:40:14 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:40:24 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:40:24 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:41:03 runner 4 connected 2025/10/28 15:41:20 runner 8 connected 2025/10/28 15:43:02 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 15:43:17 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:43:24 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 15:43:27 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:43:48 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:43:52 runner 7 connected 2025/10/28 15:43:58 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:44:06 runner 1 connected 2025/10/28 15:44:08 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:44:13 runner 2 connected 2025/10/28 15:44:16 runner 8 connected 2025/10/28 15:44:19 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:44:37 runner 4 connected 2025/10/28 15:44:55 runner 6 connected 2025/10/28 15:44:57 runner 5 connected 2025/10/28 15:45:08 runner 3 connected 2025/10/28 15:45:09 STAT { "buffer too small": 0, "candidate triage jobs": 39, "candidates": 41135, "comps overflows": 0, "corpus": 39591, "corpus [files]": 16201, "corpus [symbols]": 17027, "cover overflows": 27854, "coverage": 291106, "distributor delayed": 47259, "distributor undelayed": 47258, "distributor violated": 678, "exec candidate": 40102, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 15, "exec seeds": 0, "exec smash": 0, "exec total [base]": 81478, "exec total [new]": 201255, "exec triage": 123454, "executor restarts [base]": 205, "executor restarts [new]": 616, "fault jobs": 0, "fuzzer jobs": 39, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 8, "hints jobs": 0, "max signal": 293473, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 40102, "no exec duration": 45625000000, "no exec requests": 365, "pending": 50, "prog exec time": 263, "reproducing": 0, "rpc recv": 9230733968, "rpc sent": 1187412192, "signal": 286597, "smash jobs": 0, "triage jobs": 0, "vm output": 25389724, "vm restarts [base]": 19, "vm restarts [new]": 93 } 2025/10/28 15:46:20 base crash: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr 2025/10/28 15:47:07 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:47:07 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:47:12 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/10/28 15:47:16 runner 0 connected 2025/10/28 15:47:33 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:47:33 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:47:43 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:47:43 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:47:50 base crash: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr 2025/10/28 15:47:54 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:47:54 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:47:56 runner 4 connected 2025/10/28 15:48:05 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:48:05 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:48:08 runner 2 connected 2025/10/28 15:48:15 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:48:15 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:48:21 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 15:48:24 runner 8 connected 2025/10/28 15:48:33 runner 6 connected 2025/10/28 15:48:38 runner 0 connected 2025/10/28 15:48:44 runner 1 connected 2025/10/28 15:48:54 runner 7 connected 2025/10/28 15:49:03 runner 5 connected 2025/10/28 15:49:10 runner 0 connected 2025/10/28 15:49:11 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:49:11 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:49:22 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:49:22 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:49:27 base crash: lost connection to test machine 2025/10/28 15:49:44 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:49:44 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:50:00 runner 4 connected 2025/10/28 15:50:05 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:50:05 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:50:06 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:50:06 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:50:09 STAT { "buffer too small": 0, "candidate triage jobs": 23, "candidates": 38645, "comps overflows": 0, "corpus": 42044, "corpus [files]": 17065, "corpus [symbols]": 17884, "cover overflows": 30758, "coverage": 296234, "distributor delayed": 49780, "distributor undelayed": 49774, "distributor violated": 682, "exec candidate": 42592, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 19, "exec seeds": 0, "exec smash": 0, "exec total [base]": 90140, "exec total [new]": 222370, "exec triage": 131214, "executor restarts [base]": 219, "executor restarts [new]": 686, "fault jobs": 0, "fuzzer jobs": 23, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 4, "hints jobs": 0, "max signal": 298572, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 42592, "no exec duration": 45737000000, "no exec requests": 369, "pending": 61, "prog exec time": 501, "reproducing": 0, "rpc recv": 10027984000, "rpc sent": 1341258096, "signal": 291708, "smash jobs": 0, "triage jobs": 0, "vm output": 28267981, "vm restarts [base]": 21, "vm restarts [new]": 102 } 2025/10/28 15:50:11 runner 2 connected 2025/10/28 15:50:15 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:50:15 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:50:16 runner 1 connected 2025/10/28 15:50:26 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:50:26 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:50:34 runner 5 connected 2025/10/28 15:50:36 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:50:36 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:50:48 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:50:48 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:50:54 runner 8 connected 2025/10/28 15:50:56 runner 1 connected 2025/10/28 15:51:04 runner 7 connected 2025/10/28 15:51:14 runner 3 connected 2025/10/28 15:51:21 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:51:25 runner 6 connected 2025/10/28 15:51:38 runner 0 connected 2025/10/28 15:52:17 runner 5 connected 2025/10/28 15:52:22 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:52:22 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:52:23 patched crashed: general protection fault in pcl818_ai_cancel [need repro = false] 2025/10/28 15:52:28 base crash: lost connection to test machine 2025/10/28 15:52:57 patched crashed: KASAN: slab-use-after-free Read in hdm_disconnect [need repro = false] 2025/10/28 15:53:08 patched crashed: KASAN: slab-use-after-free Read in hdm_disconnect [need repro = false] 2025/10/28 15:53:17 runner 0 connected 2025/10/28 15:53:18 runner 0 connected 2025/10/28 15:53:18 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:53:18 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:53:20 runner 4 connected 2025/10/28 15:53:23 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:53:23 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:53:35 patched crashed: KASAN: slab-use-after-free Read in hdm_disconnect [need repro = false] 2025/10/28 15:53:46 runner 2 connected 2025/10/28 15:53:52 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:53:52 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:53:55 base crash: WARNING in xfrm_state_fini 2025/10/28 15:53:56 patched crashed: INFO: trying to register non-static key in ocfs2_dlm_shutdown [need repro = true] 2025/10/28 15:53:56 scheduled a reproduction of 'INFO: trying to register non-static key in ocfs2_dlm_shutdown' 2025/10/28 15:53:58 runner 3 connected 2025/10/28 15:54:08 runner 8 connected 2025/10/28 15:54:12 runner 6 connected 2025/10/28 15:54:24 runner 7 connected 2025/10/28 15:54:41 runner 0 connected 2025/10/28 15:54:43 runner 2 connected 2025/10/28 15:54:44 crash "WARNING in dax_iomap_rw" is already known 2025/10/28 15:54:44 base crash "WARNING in dax_iomap_rw" is to be ignored 2025/10/28 15:54:44 patched crashed: WARNING in dax_iomap_rw [need repro = false] 2025/10/28 15:54:44 runner 4 connected 2025/10/28 15:54:54 crash "WARNING in dax_iomap_rw" is already known 2025/10/28 15:54:54 base crash "WARNING in dax_iomap_rw" is to be ignored 2025/10/28 15:54:54 patched crashed: WARNING in dax_iomap_rw [need repro = false] 2025/10/28 15:55:09 STAT { "buffer too small": 0, "candidate triage jobs": 19, "candidates": 37419, "comps overflows": 0, "corpus": 43232, "corpus [files]": 17554, "corpus [symbols]": 18336, "cover overflows": 33433, "coverage": 298836, "distributor delayed": 51446, "distributor undelayed": 51446, "distributor violated": 684, "exec candidate": 43818, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 20, "exec seeds": 0, "exec smash": 0, "exec total [base]": 95947, "exec total [new]": 238780, "exec triage": 135001, "executor restarts [base]": 237, "executor restarts [new]": 774, "fault jobs": 0, "fuzzer jobs": 19, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 6, "hints jobs": 0, "max signal": 301209, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 43818, "no exec duration": 45872000000, "no exec requests": 371, "pending": 70, "prog exec time": 287, "reproducing": 0, "rpc recv": 10954685304, "rpc sent": 1482277992, "signal": 294342, "smash jobs": 0, "triage jobs": 0, "vm output": 30711733, "vm restarts [base]": 24, "vm restarts [new]": 120 } 2025/10/28 15:55:10 base crash: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr 2025/10/28 15:55:17 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:55:17 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:55:32 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:55:34 runner 3 connected 2025/10/28 15:55:43 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:55:44 runner 6 connected 2025/10/28 15:55:53 base crash: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr 2025/10/28 15:55:59 runner 0 connected 2025/10/28 15:56:06 runner 2 connected 2025/10/28 15:56:21 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 15:56:22 runner 0 connected 2025/10/28 15:56:22 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:56:22 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:56:32 runner 5 connected 2025/10/28 15:56:33 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:56:33 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:56:43 runner 2 connected 2025/10/28 15:56:45 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:56:45 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:56:53 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:56:53 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:56:55 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:56:55 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:57:06 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:57:06 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:57:09 runner 1 connected 2025/10/28 15:57:12 runner 7 connected 2025/10/28 15:57:16 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:57:16 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:57:23 runner 4 connected 2025/10/28 15:57:33 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:57:33 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:57:35 runner 2 connected 2025/10/28 15:57:43 runner 6 connected 2025/10/28 15:57:44 runner 0 connected 2025/10/28 15:57:55 runner 3 connected 2025/10/28 15:58:13 runner 5 connected 2025/10/28 15:58:22 runner 7 connected 2025/10/28 15:58:36 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 15:58:36 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 15:59:00 base crash: kernel BUG in txEnd 2025/10/28 15:59:21 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 15:59:33 runner 6 connected 2025/10/28 15:59:48 runner 2 connected 2025/10/28 15:59:51 crash "INFO: task hung in reg_check_chans_work" is already known 2025/10/28 15:59:51 base crash "INFO: task hung in reg_check_chans_work" is to be ignored 2025/10/28 15:59:51 patched crashed: INFO: task hung in reg_check_chans_work [need repro = false] 2025/10/28 16:00:09 STAT { "buffer too small": 0, "candidate triage jobs": 10, "candidates": 36523, "comps overflows": 0, "corpus": 44099, "corpus [files]": 17915, "corpus [symbols]": 18662, "cover overflows": 35490, "coverage": 301345, "distributor delayed": 52739, "distributor undelayed": 52738, "distributor violated": 684, "exec candidate": 44714, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 20, "exec seeds": 0, "exec smash": 0, "exec total [base]": 102632, "exec total [new]": 251072, "exec triage": 137790, "executor restarts [base]": 262, "executor restarts [new]": 865, "fault jobs": 0, "fuzzer jobs": 10, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 6, "hints jobs": 0, "max signal": 303753, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 44714, "no exec duration": 45886000000, "no exec requests": 372, "pending": 80, "prog exec time": 290, "reproducing": 0, "rpc recv": 11791466148, "rpc sent": 1598070688, "signal": 296850, "smash jobs": 0, "triage jobs": 0, "vm output": 33103698, "vm restarts [base]": 27, "vm restarts [new]": 135 } 2025/10/28 16:00:16 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:00:16 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:00:17 runner 2 connected 2025/10/28 16:00:48 runner 8 connected 2025/10/28 16:01:12 runner 6 connected 2025/10/28 16:01:42 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:01:49 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:01:49 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:02:13 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:02:23 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:02:34 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:02:39 runner 5 connected 2025/10/28 16:02:45 runner 7 connected 2025/10/28 16:02:57 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:03:02 runner 8 connected 2025/10/28 16:03:12 runner 2 connected 2025/10/28 16:03:23 runner 0 connected 2025/10/28 16:03:46 runner 6 connected 2025/10/28 16:03:49 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:03:49 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:04:03 base crash: unregister_netdevice: waiting for DEV to become free 2025/10/28 16:04:04 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:04:14 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:04:14 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:04:38 runner 4 connected 2025/10/28 16:04:42 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:04:42 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:04:52 runner 2 connected 2025/10/28 16:04:53 runner 8 connected 2025/10/28 16:04:53 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:04:53 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:05:03 runner 0 connected 2025/10/28 16:05:09 STAT { "buffer too small": 0, "candidate triage jobs": 5, "candidates": 35390, "comps overflows": 0, "corpus": 45174, "corpus [files]": 18335, "corpus [symbols]": 19043, "cover overflows": 38447, "coverage": 303545, "distributor delayed": 54038, "distributor undelayed": 54038, "distributor violated": 686, "exec candidate": 45847, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 21, "exec seeds": 0, "exec smash": 0, "exec total [base]": 110912, "exec total [new]": 269724, "exec triage": 141282, "executor restarts [base]": 278, "executor restarts [new]": 934, "fault jobs": 0, "fuzzer jobs": 5, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 5, "hints jobs": 0, "max signal": 306065, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 45831, "no exec duration": 45886000000, "no exec requests": 372, "pending": 86, "prog exec time": 285, "reproducing": 0, "rpc recv": 12496035948, "rpc sent": 1738849248, "signal": 299051, "smash jobs": 0, "triage jobs": 0, "vm output": 36329419, "vm restarts [base]": 28, "vm restarts [new]": 147 } 2025/10/28 16:05:17 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:05:17 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:05:32 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:05:32 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:05:32 runner 1 connected 2025/10/28 16:05:42 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:05:42 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:05:47 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:05:47 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:05:49 runner 3 connected 2025/10/28 16:06:10 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:06:10 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:06:13 runner 7 connected 2025/10/28 16:06:21 runner 2 connected 2025/10/28 16:06:30 runner 0 connected 2025/10/28 16:06:36 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:06:45 runner 4 connected 2025/10/28 16:06:59 runner 1 connected 2025/10/28 16:07:01 base crash: WARNING in xfrm_state_fini 2025/10/28 16:07:27 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:07:27 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:07:33 runner 6 connected 2025/10/28 16:07:50 runner 0 connected 2025/10/28 16:07:50 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 16:08:03 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/10/28 16:08:12 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:08:12 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:08:16 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:08:16 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:08:24 runner 3 connected 2025/10/28 16:08:25 patched crashed: KASAN: slab-use-after-free Read in hdm_disconnect [need repro = false] 2025/10/28 16:08:27 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:08:27 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:08:37 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:08:37 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:08:39 runner 7 connected 2025/10/28 16:08:52 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:08:52 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:08:53 runner 5 connected 2025/10/28 16:09:00 runner 8 connected 2025/10/28 16:09:03 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:09:03 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:09:05 runner 2 connected 2025/10/28 16:09:14 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:09:14 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:09:15 runner 1 connected 2025/10/28 16:09:16 runner 4 connected 2025/10/28 16:09:24 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:09:24 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:09:26 base crash: KASAN: slab-use-after-free Read in l2cap_unregister_user 2025/10/28 16:09:27 runner 6 connected 2025/10/28 16:09:33 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:09:35 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:09:35 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:09:40 runner 3 connected 2025/10/28 16:09:44 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:09:54 runner 0 connected 2025/10/28 16:10:04 runner 7 connected 2025/10/28 16:10:09 STAT { "buffer too small": 0, "candidate triage jobs": 17, "candidates": 34917, "comps overflows": 0, "corpus": 45558, "corpus [files]": 18503, "corpus [symbols]": 19192, "cover overflows": 40644, "coverage": 304278, "distributor delayed": 54670, "distributor undelayed": 54666, "distributor violated": 686, "exec candidate": 46320, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 25, "exec seeds": 0, "exec smash": 0, "exec total [base]": 120485, "exec total [new]": 282351, "exec triage": 142532, "executor restarts [base]": 291, "executor restarts [new]": 1021, "fault jobs": 0, "fuzzer jobs": 17, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 4, "hints jobs": 0, "max signal": 306832, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 46241, "no exec duration": 47507000000, "no exec requests": 376, "pending": 101, "prog exec time": 278, "reproducing": 0, "rpc recv": 13376522616, "rpc sent": 1874369656, "signal": 299801, "smash jobs": 0, "triage jobs": 0, "vm output": 38345923, "vm restarts [base]": 29, "vm restarts [new]": 166 } 2025/10/28 16:10:12 runner 5 connected 2025/10/28 16:10:15 runner 1 connected 2025/10/28 16:10:24 runner 8 connected 2025/10/28 16:10:24 runner 2 connected 2025/10/28 16:10:32 runner 1 connected 2025/10/28 16:10:56 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:10:56 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:11:01 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 16:11:04 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:11:04 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:11:06 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:11:06 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:11:10 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:11:46 runner 8 connected 2025/10/28 16:11:54 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:11:54 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:11:58 runner 2 connected 2025/10/28 16:11:59 runner 7 connected 2025/10/28 16:12:00 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:12:00 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:12:01 runner 3 connected 2025/10/28 16:12:02 runner 4 connected 2025/10/28 16:12:40 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:12:40 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:12:50 runner 1 connected 2025/10/28 16:12:56 runner 0 connected 2025/10/28 16:13:09 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:13:16 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 16:13:31 base crash: INFO: task hung in __iterate_supers 2025/10/28 16:13:37 runner 6 connected 2025/10/28 16:13:50 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:13:50 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:14:05 runner 4 connected 2025/10/28 16:14:06 runner 5 connected 2025/10/28 16:14:21 runner 0 connected 2025/10/28 16:14:39 runner 7 connected 2025/10/28 16:14:41 base crash: possible deadlock in ntfs_fiemap 2025/10/28 16:14:42 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:14:57 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:14:57 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:14:59 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:15:06 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:15:06 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:15:09 STAT { "buffer too small": 0, "candidate triage jobs": 8, "candidates": 34522, "comps overflows": 0, "corpus": 45864, "corpus [files]": 18613, "corpus [symbols]": 19293, "cover overflows": 43922, "coverage": 304923, "distributor delayed": 55119, "distributor undelayed": 55117, "distributor violated": 688, "exec candidate": 46715, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 25, "exec seeds": 0, "exec smash": 0, "exec total [base]": 130121, "exec total [new]": 299671, "exec triage": 143600, "executor restarts [base]": 306, "executor restarts [new]": 1095, "fault jobs": 0, "fuzzer jobs": 8, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 3, "hints jobs": 0, "max signal": 307523, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 46568, "no exec duration": 50184000000, "no exec requests": 381, "pending": 110, "prog exec time": 244, "reproducing": 0, "rpc recv": 14174008592, "rpc sent": 2003548680, "signal": 300457, "smash jobs": 0, "triage jobs": 0, "vm output": 40604232, "vm restarts [base]": 31, "vm restarts [new]": 181 } 2025/10/28 16:15:10 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:15:18 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:15:18 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:15:31 runner 2 connected 2025/10/28 16:15:31 runner 5 connected 2025/10/28 16:15:46 runner 8 connected 2025/10/28 16:15:50 runner 0 connected 2025/10/28 16:15:56 base crash: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr 2025/10/28 16:15:59 runner 7 connected 2025/10/28 16:16:00 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:16:02 runner 1 connected 2025/10/28 16:16:05 base crash: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr 2025/10/28 16:16:07 runner 6 connected 2025/10/28 16:16:11 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:16:16 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:16:44 runner 2 connected 2025/10/28 16:16:54 runner 0 connected 2025/10/28 16:16:55 patched crashed: possible deadlock in ocfs2_reserve_suballoc_bits [need repro = true] 2025/10/28 16:16:55 scheduled a reproduction of 'possible deadlock in ocfs2_reserve_suballoc_bits' 2025/10/28 16:16:56 runner 4 connected 2025/10/28 16:16:59 runner 0 connected 2025/10/28 16:17:05 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:17:05 runner 5 connected 2025/10/28 16:17:07 base crash: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr 2025/10/28 16:17:07 base crash: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr 2025/10/28 16:17:15 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:17:26 patched crashed: KASAN: slab-out-of-bounds Read in change_page_attr_set_clr [need repro = false] 2025/10/28 16:17:44 runner 8 connected 2025/10/28 16:18:01 runner 6 connected 2025/10/28 16:18:03 runner 2 connected 2025/10/28 16:18:04 runner 1 connected 2025/10/28 16:18:04 runner 1 connected 2025/10/28 16:18:15 runner 4 connected 2025/10/28 16:19:01 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 16:19:57 runner 4 connected 2025/10/28 16:20:09 STAT { "buffer too small": 0, "candidate triage jobs": 3, "candidates": 15515, "comps overflows": 0, "corpus": 46080, "corpus [files]": 18704, "corpus [symbols]": 19374, "cover overflows": 47881, "coverage": 305331, "distributor delayed": 55522, "distributor undelayed": 55522, "distributor violated": 699, "exec candidate": 65722, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 25, "exec seeds": 0, "exec smash": 0, "exec total [base]": 136698, "exec total [new]": 321485, "exec triage": 144481, "executor restarts [base]": 329, "executor restarts [new]": 1168, "fault jobs": 0, "fuzzer jobs": 3, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 8, "hints jobs": 0, "max signal": 308033, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 46823, "no exec duration": 50193000000, "no exec requests": 382, "pending": 112, "prog exec time": 227, "reproducing": 0, "rpc recv": 14923737324, "rpc sent": 2133749400, "signal": 300853, "smash jobs": 0, "triage jobs": 0, "vm output": 42837382, "vm restarts [base]": 36, "vm restarts [new]": 195 } 2025/10/28 16:20:26 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 16:21:25 runner 8 connected 2025/10/28 16:21:39 triaged 92.1% of the corpus 2025/10/28 16:21:39 starting bug reproductions 2025/10/28 16:21:39 starting bug reproductions (max 6 VMs, 4 repros) 2025/10/28 16:21:39 reproduction of "unregister_netdevice: waiting for DEV to become free" aborted: it's no longer needed 2025/10/28 16:21:39 start reproducing 'WARNING in folio_memcg' 2025/10/28 16:21:39 start reproducing 'INFO: task hung in addrconf_verify_work' 2025/10/28 16:21:39 start reproducing 'INFO: trying to register non-static key in ocfs2_dlm_shutdown' 2025/10/28 16:21:39 start reproducing 'possible deadlock in ocfs2_reserve_suballoc_bits' 2025/10/28 16:22:54 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:23:24 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:24:26 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:24:55 crash "possible deadlock in ocfs2_try_remove_refcount_tree" is already known 2025/10/28 16:24:55 base crash "possible deadlock in ocfs2_try_remove_refcount_tree" is to be ignored 2025/10/28 16:24:55 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 16:24:58 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:25:09 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 0, "corpus": 46147, "corpus [files]": 18739, "corpus [symbols]": 19408, "cover overflows": 51356, "coverage": 305483, "distributor delayed": 55669, "distributor undelayed": 55666, "distributor violated": 707, "exec candidate": 81237, "exec collide": 171, "exec fuzz": 297, "exec gen": 20, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 25, "exec seeds": 0, "exec smash": 0, "exec total [base]": 148055, "exec total [new]": 337912, "exec triage": 144905, "executor restarts [base]": 344, "executor restarts [new]": 1187, "fault jobs": 0, "fuzzer jobs": 5, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 2, "hints jobs": 0, "max signal": 308297, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 4, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 46934, "no exec duration": 51213000000, "no exec requests": 389, "pending": 107, "prog exec time": 492, "reproducing": 4, "rpc recv": 15204688704, "rpc sent": 2228788248, "signal": 300973, "smash jobs": 0, "triage jobs": 5, "vm output": 44619272, "vm restarts [base]": 36, "vm restarts [new]": 196 } 2025/10/28 16:25:44 runner 6 connected 2025/10/28 16:25:54 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:25:59 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:25:59 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:26:26 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:26:55 runner 8 connected 2025/10/28 16:27:12 base crash: lost connection to test machine 2025/10/28 16:27:57 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:28:09 runner 2 connected 2025/10/28 16:28:47 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:29:08 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:29:08 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:29:17 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:29:18 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:29:18 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:29:57 runner 7 connected 2025/10/28 16:30:05 runner 8 connected 2025/10/28 16:30:09 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:30:09 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 28, "corpus": 46172, "corpus [files]": 18749, "corpus [symbols]": 19421, "cover overflows": 52990, "coverage": 305526, "distributor delayed": 55782, "distributor undelayed": 55772, "distributor violated": 707, "exec candidate": 81237, "exec collide": 1004, "exec fuzz": 1919, "exec gen": 90, "exec hints": 575, "exec inject": 0, "exec minimize": 732, "exec retries": 26, "exec seeds": 87, "exec smash": 608, "exec total [base]": 155482, "exec total [new]": 342637, "exec triage": 145102, "executor restarts [base]": 356, "executor restarts [new]": 1215, "fault jobs": 0, "fuzzer jobs": 34, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 2, "hints jobs": 10, "max signal": 308560, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 407, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47005, "no exec duration": 54640000000, "no exec requests": 397, "pending": 110, "prog exec time": 594, "reproducing": 4, "rpc recv": 15614929660, "rpc sent": 2418711120, "signal": 301012, "smash jobs": 9, "triage jobs": 15, "vm output": 47387429, "vm restarts [base]": 37, "vm restarts [new]": 200 } 2025/10/28 16:30:39 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:31:32 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:32:03 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:32:47 base crash: INFO: task hung in __iterate_supers 2025/10/28 16:32:48 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:33:22 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:33:37 runner 2 connected 2025/10/28 16:34:00 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:34:01 repro finished 'possible deadlock in ocfs2_reserve_suballoc_bits', repro=false crepro=false desc='' hub=false from_dashboard=false 2025/10/28 16:34:01 failed repro for "possible deadlock in ocfs2_reserve_suballoc_bits", err=%!s() 2025/10/28 16:34:01 "possible deadlock in ocfs2_reserve_suballoc_bits": saved crash log into 1761669241.crash.log 2025/10/28 16:34:01 "possible deadlock in ocfs2_reserve_suballoc_bits": saved repro log into 1761669241.repro.log 2025/10/28 16:34:11 runner 0 connected 2025/10/28 16:34:29 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:35:09 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 75, "corpus": 46233, "corpus [files]": 18776, "corpus [symbols]": 19446, "cover overflows": 54194, "coverage": 305685, "distributor delayed": 55927, "distributor undelayed": 55926, "distributor violated": 707, "exec candidate": 81237, "exec collide": 1611, "exec fuzz": 3016, "exec gen": 147, "exec hints": 1232, "exec inject": 0, "exec minimize": 1790, "exec retries": 27, "exec seeds": 255, "exec smash": 1544, "exec total [base]": 160449, "exec total [new]": 347523, "exec triage": 145400, "executor restarts [base]": 372, "executor restarts [new]": 1242, "fault jobs": 0, "fuzzer jobs": 99, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 3, "hints jobs": 40, "max signal": 308788, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 989, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47105, "no exec duration": 162988000000, "no exec requests": 730, "pending": 110, "prog exec time": 369, "reproducing": 3, "rpc recv": 16004427340, "rpc sent": 2610390096, "signal": 301160, "smash jobs": 51, "triage jobs": 8, "vm output": 50265541, "vm restarts [base]": 38, "vm restarts [new]": 201 } 2025/10/28 16:35:10 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:35:10 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:35:22 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:35:33 crash "possible deadlock in ocfs2_try_remove_refcount_tree" is already known 2025/10/28 16:35:33 base crash "possible deadlock in ocfs2_try_remove_refcount_tree" is to be ignored 2025/10/28 16:35:33 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 16:35:47 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:35:59 runner 0 connected 2025/10/28 16:36:00 base crash: lost connection to test machine 2025/10/28 16:36:15 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:36:23 runner 7 connected 2025/10/28 16:36:47 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 16:36:56 runner 2 connected 2025/10/28 16:36:59 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 16:37:03 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:37:13 patched crashed: INFO: task hung in __iterate_supers [need repro = false] 2025/10/28 16:37:28 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 16:37:31 base crash: lost connection to test machine 2025/10/28 16:37:32 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:37:35 runner 8 connected 2025/10/28 16:37:47 runner 7 connected 2025/10/28 16:38:01 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:38:02 runner 6 connected 2025/10/28 16:38:09 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 16:38:18 runner 0 connected 2025/10/28 16:38:19 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 16:38:20 runner 2 connected 2025/10/28 16:38:50 runner 1 connected 2025/10/28 16:38:58 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:39:02 base crash: kernel BUG in jfs_evict_inode 2025/10/28 16:39:03 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/10/28 16:39:05 runner 8 connected 2025/10/28 16:39:08 runner 7 connected 2025/10/28 16:39:22 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 16:39:24 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:39:58 runner 2 connected 2025/10/28 16:39:59 runner 0 connected 2025/10/28 16:40:09 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 89, "corpus": 46252, "corpus [files]": 18781, "corpus [symbols]": 19449, "cover overflows": 54774, "coverage": 305730, "distributor delayed": 56026, "distributor undelayed": 56025, "distributor violated": 707, "exec candidate": 81237, "exec collide": 1991, "exec fuzz": 3710, "exec gen": 180, "exec hints": 1632, "exec inject": 0, "exec minimize": 2139, "exec retries": 27, "exec seeds": 323, "exec smash": 2187, "exec total [base]": 163112, "exec total [new]": 350209, "exec triage": 145512, "executor restarts [base]": 386, "executor restarts [new]": 1279, "fault jobs": 0, "fuzzer jobs": 85, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 4, "hints jobs": 33, "max signal": 308892, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 1240, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47145, "no exec duration": 194533000000, "no exec requests": 838, "pending": 111, "prog exec time": 512, "reproducing": 3, "rpc recv": 16537966876, "rpc sent": 2721376152, "signal": 301197, "smash jobs": 43, "triage jobs": 9, "vm output": 53177478, "vm restarts [base]": 41, "vm restarts [new]": 211 } 2025/10/28 16:40:18 runner 1 connected 2025/10/28 16:40:21 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:40:41 base crash: WARNING in xfrm6_tunnel_net_exit 2025/10/28 16:40:53 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:41:37 runner 1 connected 2025/10/28 16:41:47 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:41:53 base crash: lost connection to test machine 2025/10/28 16:42:39 crash "possible deadlock in ocfs2_try_remove_refcount_tree" is already known 2025/10/28 16:42:39 base crash "possible deadlock in ocfs2_try_remove_refcount_tree" is to be ignored 2025/10/28 16:42:39 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 16:42:43 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/10/28 16:42:49 runner 0 connected 2025/10/28 16:43:14 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:43:28 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/10/28 16:43:30 runner 1 connected 2025/10/28 16:43:39 runner 1 connected 2025/10/28 16:44:07 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:44:07 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:44:24 runner 0 connected 2025/10/28 16:44:27 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:44:32 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 16:44:32 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 16:44:38 base crash: lost connection to test machine 2025/10/28 16:44:51 base crash: lost connection to test machine 2025/10/28 16:44:55 runner 7 connected 2025/10/28 16:44:55 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:44:55 repro finished 'WARNING in folio_memcg', repro=true crepro=false desc='WARNING in folio_memcg' hub=false from_dashboard=false 2025/10/28 16:44:55 found repro for "WARNING in folio_memcg" (orig title: "-SAME-", reliability: 1), took 23.26 minutes 2025/10/28 16:44:55 "WARNING in folio_memcg": saved crash log into 1761669895.crash.log 2025/10/28 16:44:55 "WARNING in folio_memcg": saved repro log into 1761669895.repro.log 2025/10/28 16:44:55 start reproducing 'WARNING in folio_memcg' 2025/10/28 16:45:04 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 16:45:09 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 110, "corpus": 46285, "corpus [files]": 18793, "corpus [symbols]": 19458, "cover overflows": 56227, "coverage": 305779, "distributor delayed": 56204, "distributor undelayed": 56203, "distributor violated": 707, "exec candidate": 81237, "exec collide": 2766, "exec fuzz": 5174, "exec gen": 252, "exec hints": 2703, "exec inject": 0, "exec minimize": 3085, "exec retries": 28, "exec seeds": 424, "exec smash": 3322, "exec total [base]": 165945, "exec total [new]": 356015, "exec triage": 145747, "executor restarts [base]": 432, "executor restarts [new]": 1370, "fault jobs": 0, "fuzzer jobs": 49, "fuzzing VMs [base]": 0, "fuzzing VMs [new]": 2, "hints jobs": 22, "max signal": 309123, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 1799, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47230, "no exec duration": 200511000000, "no exec requests": 850, "pending": 112, "prog exec time": 615, "reproducing": 3, "rpc recv": 16935993884, "rpc sent": 2885299200, "signal": 301243, "smash jobs": 13, "triage jobs": 14, "vm output": 56937802, "vm restarts [base]": 44, "vm restarts [new]": 215 } 2025/10/28 16:45:21 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 16:45:27 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 16:45:27 runner 2 connected 2025/10/28 16:45:28 runner 1 connected 2025/10/28 16:45:40 runner 1 connected 2025/10/28 16:45:43 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:45:50 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 16:45:52 runner 0 connected 2025/10/28 16:45:53 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 16:46:01 base crash: lost connection to test machine 2025/10/28 16:46:10 runner 7 connected 2025/10/28 16:46:11 base crash: lost connection to test machine 2025/10/28 16:46:14 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:46:15 runner 6 connected 2025/10/28 16:46:42 runner 8 connected 2025/10/28 16:46:46 runner 1 connected 2025/10/28 16:46:47 attempt #0 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 16:46:51 runner 2 connected 2025/10/28 16:46:58 repro finished 'INFO: trying to register non-static key in ocfs2_dlm_shutdown', repro=false crepro=false desc='' hub=false from_dashboard=false 2025/10/28 16:46:58 failed repro for "INFO: trying to register non-static key in ocfs2_dlm_shutdown", err=%!s() 2025/10/28 16:46:58 "INFO: trying to register non-static key in ocfs2_dlm_shutdown": saved crash log into 1761670018.crash.log 2025/10/28 16:46:58 "INFO: trying to register non-static key in ocfs2_dlm_shutdown": saved repro log into 1761670018.repro.log 2025/10/28 16:47:01 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:47:01 runner 1 connected 2025/10/28 16:47:36 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:48:17 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:48:45 attempt #1 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 16:48:46 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:49:41 runner 2 connected 2025/10/28 16:50:09 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 115, "corpus": 46309, "corpus [files]": 18797, "corpus [symbols]": 19464, "cover overflows": 57404, "coverage": 305813, "distributor delayed": 56321, "distributor undelayed": 56321, "distributor violated": 707, "exec candidate": 81237, "exec collide": 3638, "exec fuzz": 6838, "exec gen": 332, "exec hints": 4237, "exec inject": 0, "exec minimize": 3822, "exec retries": 28, "exec seeds": 486, "exec smash": 3972, "exec total [base]": 167930, "exec total [new]": 361819, "exec triage": 145940, "executor restarts [base]": 464, "executor restarts [new]": 1474, "fault jobs": 0, "fuzzer jobs": 17, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 6, "hints jobs": 11, "max signal": 309236, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 2312, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47296, "no exec duration": 200761000000, "no exec requests": 853, "pending": 112, "prog exec time": 502, "reproducing": 2, "rpc recv": 17407872024, "rpc sent": 3035372768, "signal": 301277, "smash jobs": 3, "triage jobs": 3, "vm output": 60070528, "vm restarts [base]": 48, "vm restarts [new]": 222 } 2025/10/28 16:50:20 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:50:22 base crash: lost connection to test machine 2025/10/28 16:50:38 attempt #2 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 16:50:39 patched-only: WARNING in folio_memcg 2025/10/28 16:50:39 scheduled a reproduction of 'WARNING in folio_memcg (full)' 2025/10/28 16:50:39 start reproducing 'WARNING in folio_memcg (full)' 2025/10/28 16:51:15 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:51:19 runner 2 connected 2025/10/28 16:51:20 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/10/28 16:51:29 runner 0 connected 2025/10/28 16:51:41 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:51:58 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:52:09 runner 2 connected 2025/10/28 16:53:03 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:53:24 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 16:53:34 base crash: lost connection to test machine 2025/10/28 16:53:48 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 16:53:56 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:54:03 crash "possible deadlock in ocfs2_init_acl" is already known 2025/10/28 16:54:03 base crash "possible deadlock in ocfs2_init_acl" is to be ignored 2025/10/28 16:54:03 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/10/28 16:54:14 runner 2 connected 2025/10/28 16:54:20 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:54:23 runner 1 connected 2025/10/28 16:54:45 runner 6 connected 2025/10/28 16:54:53 runner 7 connected 2025/10/28 16:55:06 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:55:07 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:55:09 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 141, "corpus": 46330, "corpus [files]": 18806, "corpus [symbols]": 19471, "cover overflows": 59245, "coverage": 305840, "distributor delayed": 56443, "distributor undelayed": 56443, "distributor violated": 711, "exec candidate": 81237, "exec collide": 5136, "exec fuzz": 9608, "exec gen": 482, "exec hints": 6507, "exec inject": 0, "exec minimize": 4331, "exec retries": 30, "exec seeds": 546, "exec smash": 4496, "exec total [base]": 172145, "exec total [new]": 369822, "exec triage": 146158, "executor restarts [base]": 499, "executor restarts [new]": 1542, "fault jobs": 0, "fuzzer jobs": 22, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 4, "hints jobs": 10, "max signal": 309375, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 2608, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47371, "no exec duration": 200872000000, "no exec requests": 856, "pending": 112, "prog exec time": 376, "reproducing": 3, "rpc recv": 17831054296, "rpc sent": 3247397072, "signal": 301306, "smash jobs": 2, "triage jobs": 10, "vm output": 62960141, "vm restarts [base]": 51, "vm restarts [new]": 226 } 2025/10/28 16:55:14 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 16:55:26 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:55:49 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/10/28 16:56:03 runner 1 connected 2025/10/28 16:56:20 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:56:32 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:56:39 runner 0 connected 2025/10/28 16:57:25 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:57:27 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:57:38 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 16:57:41 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:57:47 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 16:57:49 crash "possible deadlock in ocfs2_init_acl" is already known 2025/10/28 16:57:49 base crash "possible deadlock in ocfs2_init_acl" is to be ignored 2025/10/28 16:57:49 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/10/28 16:58:27 runner 7 connected 2025/10/28 16:58:32 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:58:36 runner 2 connected 2025/10/28 16:58:38 runner 1 connected 2025/10/28 16:58:43 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:58:47 crash "possible deadlock in ocfs2_init_acl" is already known 2025/10/28 16:58:47 base crash "possible deadlock in ocfs2_init_acl" is to be ignored 2025/10/28 16:58:47 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/10/28 16:58:48 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:58:54 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/10/28 16:58:59 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 16:59:02 crash "possible deadlock in ocfs2_init_acl" is already known 2025/10/28 16:59:02 base crash "possible deadlock in ocfs2_init_acl" is to be ignored 2025/10/28 16:59:02 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/10/28 16:59:11 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 16:59:21 base crash: possible deadlock in ntfs_fiemap 2025/10/28 16:59:37 runner 7 connected 2025/10/28 16:59:43 runner 6 connected 2025/10/28 16:59:47 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:59:48 runner 8 connected 2025/10/28 16:59:49 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 16:59:51 runner 2 connected 2025/10/28 17:00:00 runner 1 connected 2025/10/28 17:00:09 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 165, "corpus": 46362, "corpus [files]": 18826, "corpus [symbols]": 19490, "cover overflows": 60339, "coverage": 305907, "distributor delayed": 56564, "distributor undelayed": 56564, "distributor violated": 711, "exec candidate": 81237, "exec collide": 5823, "exec fuzz": 10933, "exec gen": 546, "exec hints": 7550, "exec inject": 0, "exec minimize": 4974, "exec retries": 30, "exec seeds": 642, "exec smash": 5089, "exec total [base]": 176234, "exec total [new]": 374441, "exec triage": 146316, "executor restarts [base]": 549, "executor restarts [new]": 1628, "fault jobs": 0, "fuzzer jobs": 20, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 5, "hints jobs": 10, "max signal": 309508, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 3023, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47428, "no exec duration": 201202000000, "no exec requests": 859, "pending": 112, "prog exec time": 479, "reproducing": 3, "rpc recv": 18344524296, "rpc sent": 3414411432, "signal": 301367, "smash jobs": 4, "triage jobs": 6, "vm output": 65431392, "vm restarts [base]": 52, "vm restarts [new]": 235 } 2025/10/28 17:00:10 runner 2 connected 2025/10/28 17:00:34 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 17:00:43 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:00:52 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:01:14 base crash: lost connection to test machine 2025/10/28 17:01:14 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:01:24 runner 1 connected 2025/10/28 17:01:36 base crash: WARNING in xfrm_state_fini 2025/10/28 17:01:49 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:02:03 runner 2 connected 2025/10/28 17:02:04 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 17:02:05 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 17:02:10 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:02:27 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:02:33 runner 0 connected 2025/10/28 17:03:00 runner 7 connected 2025/10/28 17:03:02 runner 1 connected 2025/10/28 17:03:12 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:03:13 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:03:49 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:04:25 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:04:37 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 17:04:41 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:05:09 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 210, "corpus": 46400, "corpus [files]": 18843, "corpus [symbols]": 19507, "cover overflows": 62356, "coverage": 305973, "distributor delayed": 56705, "distributor undelayed": 56705, "distributor violated": 718, "exec candidate": 81237, "exec collide": 6908, "exec fuzz": 12881, "exec gen": 634, "exec hints": 9323, "exec inject": 0, "exec minimize": 6041, "exec retries": 32, "exec seeds": 747, "exec smash": 6040, "exec total [base]": 181028, "exec total [new]": 381675, "exec triage": 146533, "executor restarts [base]": 573, "executor restarts [new]": 1650, "fault jobs": 0, "fuzzer jobs": 20, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 4, "hints jobs": 9, "max signal": 309637, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 3536, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47500, "no exec duration": 202849000000, "no exec requests": 870, "pending": 112, "prog exec time": 427, "reproducing": 3, "rpc recv": 18779732756, "rpc sent": 3631170280, "signal": 301432, "smash jobs": 4, "triage jobs": 7, "vm output": 69728196, "vm restarts [base]": 55, "vm restarts [new]": 238 } 2025/10/28 17:05:31 base crash: possible deadlock in ocfs2_init_acl 2025/10/28 17:05:33 runner 1 connected 2025/10/28 17:05:38 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 17:05:46 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:06:06 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:06:13 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:06:20 runner 0 connected 2025/10/28 17:06:27 runner 7 connected 2025/10/28 17:07:11 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 17:07:12 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:07:29 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:07:44 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:07:44 repro finished 'WARNING in folio_memcg (full)', repro=true crepro=true desc='WARNING in folio_memcg' hub=false from_dashboard=false 2025/10/28 17:07:44 found repro for "WARNING in folio_memcg" (orig title: "-SAME-", reliability: 1), took 17.09 minutes 2025/10/28 17:07:44 "WARNING in folio_memcg": saved crash log into 1761671264.crash.log 2025/10/28 17:07:44 "WARNING in folio_memcg": saved repro log into 1761671264.repro.log 2025/10/28 17:08:01 runner 0 connected 2025/10/28 17:08:07 runner 1 connected 2025/10/28 17:08:44 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/10/28 17:08:47 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/10/28 17:08:48 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/10/28 17:08:50 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:08:57 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 17:08:57 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 17:09:21 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:09:33 runner 7 connected 2025/10/28 17:09:43 attempt #0 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 17:09:44 runner 6 connected 2025/10/28 17:09:44 runner 1 connected 2025/10/28 17:09:46 runner 2 connected 2025/10/28 17:10:09 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:10:09 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 240, "corpus": 46424, "corpus [files]": 18847, "corpus [symbols]": 19513, "cover overflows": 64240, "coverage": 306025, "distributor delayed": 56815, "distributor undelayed": 56815, "distributor violated": 722, "exec candidate": 81237, "exec collide": 8242, "exec fuzz": 15586, "exec gen": 778, "exec hints": 10551, "exec inject": 0, "exec minimize": 6664, "exec retries": 33, "exec seeds": 820, "exec smash": 6667, "exec total [base]": 185579, "exec total [new]": 388598, "exec triage": 146716, "executor restarts [base]": 592, "executor restarts [new]": 1708, "fault jobs": 0, "fuzzer jobs": 18, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 4, "hints jobs": 3, "max signal": 309775, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 3890, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47563, "no exec duration": 204567000000, "no exec requests": 881, "pending": 113, "prog exec time": 620, "reproducing": 2, "rpc recv": 19274593148, "rpc sent": 3855217720, "signal": 301482, "smash jobs": 2, "triage jobs": 13, "vm output": 72293794, "vm restarts [base]": 57, "vm restarts [new]": 245 } 2025/10/28 17:10:13 crash "kernel BUG in ocfs2_write_cluster_by_desc" is already known 2025/10/28 17:10:13 base crash "kernel BUG in ocfs2_write_cluster_by_desc" is to be ignored 2025/10/28 17:10:13 patched crashed: kernel BUG in ocfs2_write_cluster_by_desc [need repro = false] 2025/10/28 17:10:19 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 17:10:19 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 17:10:28 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 17:10:35 patched crashed: INFO: task hung in __iterate_supers [need repro = false] 2025/10/28 17:11:09 runner 1 connected 2025/10/28 17:11:16 runner 6 connected 2025/10/28 17:11:24 runner 8 connected 2025/10/28 17:11:25 runner 0 connected 2025/10/28 17:11:34 attempt #1 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 17:11:53 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:12:22 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:13:19 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:13:19 repro finished 'WARNING in folio_memcg', repro=true crepro=false desc='WARNING in folio_memcg' hub=false from_dashboard=false 2025/10/28 17:13:19 found repro for "WARNING in folio_memcg" (orig title: "-SAME-", reliability: 1), took 28.38 minutes 2025/10/28 17:13:19 "WARNING in folio_memcg": saved crash log into 1761671599.crash.log 2025/10/28 17:13:19 "WARNING in folio_memcg": saved repro log into 1761671599.repro.log 2025/10/28 17:13:19 start reproducing 'WARNING in folio_memcg' 2025/10/28 17:13:33 attempt #2 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 17:13:33 patched-only: WARNING in folio_memcg 2025/10/28 17:13:46 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:14:30 runner 0 connected 2025/10/28 17:14:43 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:14:58 patched crashed: INFO: task hung in corrupted [need repro = false] 2025/10/28 17:15:09 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 270, "corpus": 46452, "corpus [files]": 18858, "corpus [symbols]": 19526, "cover overflows": 65520, "coverage": 306065, "distributor delayed": 56917, "distributor undelayed": 56917, "distributor violated": 722, "exec candidate": 81237, "exec collide": 9153, "exec fuzz": 17249, "exec gen": 835, "exec hints": 11377, "exec inject": 0, "exec minimize": 7763, "exec retries": 34, "exec seeds": 902, "exec smash": 7175, "exec total [base]": 188644, "exec total [new]": 393932, "exec triage": 146905, "executor restarts [base]": 607, "executor restarts [new]": 1764, "fault jobs": 0, "fuzzer jobs": 35, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 5, "hints jobs": 9, "max signal": 309900, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 4539, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47629, "no exec duration": 207634000000, "no exec requests": 885, "pending": 113, "prog exec time": 602, "reproducing": 2, "rpc recv": 19622982600, "rpc sent": 4057937368, "signal": 301518, "smash jobs": 12, "triage jobs": 14, "vm output": 75974137, "vm restarts [base]": 58, "vm restarts [new]": 249 } 2025/10/28 17:15:13 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:15:18 attempt #0 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 17:15:31 patched crashed: WARNING in driver_unregister [need repro = true] 2025/10/28 17:15:31 scheduled a reproduction of 'WARNING in driver_unregister' 2025/10/28 17:15:31 start reproducing 'WARNING in driver_unregister' 2025/10/28 17:15:44 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/10/28 17:15:54 runner 7 connected 2025/10/28 17:16:07 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:16:20 runner 8 connected 2025/10/28 17:16:35 runner 6 connected 2025/10/28 17:16:56 patched crashed: kernel BUG in ocfs2_set_new_buffer_uptodate [need repro = true] 2025/10/28 17:16:56 scheduled a reproduction of 'kernel BUG in ocfs2_set_new_buffer_uptodate' 2025/10/28 17:16:56 start reproducing 'kernel BUG in ocfs2_set_new_buffer_uptodate' 2025/10/28 17:17:09 attempt #1 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 17:17:29 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:17:52 runner 8 connected 2025/10/28 17:17:58 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:18:02 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/10/28 17:18:12 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:18:51 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:18:51 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/10/28 17:18:59 runner 6 connected 2025/10/28 17:19:00 attempt #2 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 17:19:00 patched-only: WARNING in folio_memcg 2025/10/28 17:19:00 scheduled a reproduction of 'WARNING in folio_memcg (full)' 2025/10/28 17:19:39 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:19:40 runner 0 connected 2025/10/28 17:19:57 runner 1 connected 2025/10/28 17:20:01 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:20:09 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 280, "corpus": 46466, "corpus [files]": 18864, "corpus [symbols]": 19530, "cover overflows": 66290, "coverage": 306103, "distributor delayed": 56989, "distributor undelayed": 56989, "distributor violated": 725, "exec candidate": 81237, "exec collide": 9587, "exec fuzz": 18107, "exec gen": 883, "exec hints": 12073, "exec inject": 0, "exec minimize": 8346, "exec retries": 34, "exec seeds": 934, "exec smash": 7700, "exec total [base]": 192111, "exec total [new]": 397216, "exec triage": 147006, "executor restarts [base]": 624, "executor restarts [new]": 1804, "fault jobs": 0, "fuzzer jobs": 18, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 3, "hints jobs": 5, "max signal": 310364, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 4928, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47664, "no exec duration": 207801000000, "no exec requests": 886, "pending": 113, "prog exec time": 810, "reproducing": 4, "rpc recv": 20023887832, "rpc sent": 4201569904, "signal": 301547, "smash jobs": 3, "triage jobs": 10, "vm output": 79810713, "vm restarts [base]": 60, "vm restarts [new]": 254 } 2025/10/28 17:20:10 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:20:50 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:21:00 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:21:19 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:21:19 repro finished 'WARNING in folio_memcg', repro=true crepro=false desc='WARNING in folio_memcg' hub=false from_dashboard=false 2025/10/28 17:21:19 found repro for "WARNING in folio_memcg" (orig title: "-SAME-", reliability: 1), took 8.01 minutes 2025/10/28 17:21:19 start reproducing 'WARNING in folio_memcg (full)' 2025/10/28 17:21:19 "WARNING in folio_memcg": saved crash log into 1761672079.crash.log 2025/10/28 17:21:19 "WARNING in folio_memcg": saved repro log into 1761672079.repro.log 2025/10/28 17:21:19 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:19 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:19 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:19 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:20 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:21 reproduction of "WARNING in folio_memcg" aborted: it's no longer needed 2025/10/28 17:21:28 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:22:08 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:22:33 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:22:49 base crash: kernel BUG in txUnlock 2025/10/28 17:23:10 attempt #0 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 17:23:18 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 17:23:29 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:23:38 runner 1 connected 2025/10/28 17:23:56 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:24:07 runner 7 connected 2025/10/28 17:24:08 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:24:50 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:24:52 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:25:01 attempt #1 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 17:25:09 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 316, "corpus": 46489, "corpus [files]": 18879, "corpus [symbols]": 19543, "cover overflows": 67243, "coverage": 306163, "distributor delayed": 57035, "distributor undelayed": 57035, "distributor violated": 725, "exec candidate": 81237, "exec collide": 10151, "exec fuzz": 19156, "exec gen": 937, "exec hints": 12541, "exec inject": 0, "exec minimize": 8885, "exec retries": 35, "exec seeds": 988, "exec smash": 8261, "exec total [base]": 195558, "exec total [new]": 400624, "exec triage": 147124, "executor restarts [base]": 642, "executor restarts [new]": 1824, "fault jobs": 0, "fuzzer jobs": 9, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 3, "hints jobs": 4, "max signal": 310492, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 5179, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47701, "no exec duration": 207801000000, "no exec requests": 886, "pending": 0, "prog exec time": 772, "reproducing": 4, "rpc recv": 20284739880, "rpc sent": 4345421616, "signal": 301577, "smash jobs": 1, "triage jobs": 4, "vm output": 83212805, "vm restarts [base]": 61, "vm restarts [new]": 255 } 2025/10/28 17:25:16 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:25:20 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:25:26 reproducing crash 'WARNING in driver_unregister': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f net/sched/sch_generic.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:26:01 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/10/28 17:26:08 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:26:29 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:26:45 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:26:53 attempt #2 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 17:26:53 patched-only: WARNING in folio_memcg 2025/10/28 17:26:53 scheduled a reproduction of 'WARNING in folio_memcg (full)' 2025/10/28 17:26:58 runner 6 connected 2025/10/28 17:27:04 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:27:13 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 17:27:42 runner 0 connected 2025/10/28 17:27:51 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:28:01 runner 7 connected 2025/10/28 17:28:15 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:28:40 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:28:46 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 17:29:14 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/10/28 17:29:19 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:29:36 runner 6 connected 2025/10/28 17:29:36 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 17:29:42 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:29:46 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:30:03 runner 0 connected 2025/10/28 17:30:09 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 328, "corpus": 46498, "corpus [files]": 18883, "corpus [symbols]": 19548, "cover overflows": 68220, "coverage": 306201, "distributor delayed": 57119, "distributor undelayed": 57107, "distributor violated": 725, "exec candidate": 81237, "exec collide": 10940, "exec fuzz": 20688, "exec gen": 1022, "exec hints": 12975, "exec inject": 0, "exec minimize": 9245, "exec retries": 35, "exec seeds": 1015, "exec smash": 8461, "exec total [base]": 200027, "exec total [new]": 404167, "exec triage": 147242, "executor restarts [base]": 652, "executor restarts [new]": 1845, "fault jobs": 0, "fuzzer jobs": 17, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 2, "hints jobs": 2, "max signal": 310628, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 5352, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47749, "no exec duration": 208323000000, "no exec requests": 889, "pending": 1, "prog exec time": 382, "reproducing": 4, "rpc recv": 20625151832, "rpc sent": 4527134168, "signal": 301596, "smash jobs": 0, "triage jobs": 15, "vm output": 85450340, "vm restarts [base]": 63, "vm restarts [new]": 258 } 2025/10/28 17:30:25 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:30:25 runner 7 connected 2025/10/28 17:30:32 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:30:47 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:31:02 reproducing crash 'kernel BUG in ocfs2_set_new_buffer_uptodate': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/uptodate.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:31:02 repro finished 'kernel BUG in ocfs2_set_new_buffer_uptodate', repro=true crepro=false desc='kernel BUG in ocfs2_set_new_buffer_uptodate' hub=false from_dashboard=false 2025/10/28 17:31:02 found repro for "kernel BUG in ocfs2_set_new_buffer_uptodate" (orig title: "-SAME-", reliability: 1), took 13.50 minutes 2025/10/28 17:31:02 "kernel BUG in ocfs2_set_new_buffer_uptodate": saved crash log into 1761672662.crash.log 2025/10/28 17:31:02 "kernel BUG in ocfs2_set_new_buffer_uptodate": saved repro log into 1761672662.repro.log 2025/10/28 17:31:21 runner 0 connected 2025/10/28 17:31:21 base crash: lost connection to test machine 2025/10/28 17:31:29 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:31:37 runner 1 connected 2025/10/28 17:32:10 runner 1 connected 2025/10/28 17:32:20 attempt #0 to run "kernel BUG in ocfs2_set_new_buffer_uptodate" on base: crashed with kernel BUG in ocfs2_set_new_buffer_uptodate 2025/10/28 17:32:20 crashes both: kernel BUG in ocfs2_set_new_buffer_uptodate / kernel BUG in ocfs2_set_new_buffer_uptodate 2025/10/28 17:32:31 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:33:11 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 17:33:16 runner 0 connected 2025/10/28 17:33:22 crash "possible deadlock in ocfs2_reserve_suballoc_bits" is already known 2025/10/28 17:33:22 base crash "possible deadlock in ocfs2_reserve_suballoc_bits" is to be ignored 2025/10/28 17:33:22 patched crashed: possible deadlock in ocfs2_reserve_suballoc_bits [need repro = false] 2025/10/28 17:34:00 runner 8 connected 2025/10/28 17:34:01 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:34:02 crash "INFO: task hung in bdev_open" is already known 2025/10/28 17:34:02 base crash "INFO: task hung in bdev_open" is to be ignored 2025/10/28 17:34:02 patched crashed: INFO: task hung in bdev_open [need repro = false] 2025/10/28 17:34:12 runner 1 connected 2025/10/28 17:34:24 reproducing crash 'WARNING in driver_unregister': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f net/sched/sch_generic.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:34:41 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/10/28 17:34:51 runner 6 connected 2025/10/28 17:34:53 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:34:58 base crash: possible deadlock in ocfs2_xattr_set 2025/10/28 17:35:09 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 334, "corpus": 46512, "corpus [files]": 18892, "corpus [symbols]": 19553, "cover overflows": 69637, "coverage": 306259, "distributor delayed": 57216, "distributor undelayed": 57214, "distributor violated": 725, "exec candidate": 81237, "exec collide": 12393, "exec fuzz": 23414, "exec gen": 1184, "exec hints": 13464, "exec inject": 0, "exec minimize": 9508, "exec retries": 35, "exec seeds": 1060, "exec smash": 8762, "exec total [base]": 204325, "exec total [new]": 409794, "exec triage": 147426, "executor restarts [base]": 675, "executor restarts [new]": 1902, "fault jobs": 0, "fuzzer jobs": 12, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 4, "hints jobs": 3, "max signal": 310797, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 5509, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47804, "no exec duration": 213638000000, "no exec requests": 904, "pending": 1, "prog exec time": 477, "reproducing": 3, "rpc recv": 21105620900, "rpc sent": 4738516968, "signal": 301651, "smash jobs": 3, "triage jobs": 6, "vm output": 88055994, "vm restarts [base]": 65, "vm restarts [new]": 264 } 2025/10/28 17:35:21 base crash: lost connection to test machine 2025/10/28 17:35:29 runner 8 connected 2025/10/28 17:35:42 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:35:42 repro finished 'WARNING in folio_memcg (full)', repro=true crepro=true desc='WARNING in folio_memcg' hub=false from_dashboard=false 2025/10/28 17:35:42 found repro for "WARNING in folio_memcg" (orig title: "-SAME-", reliability: 1), took 14.37 minutes 2025/10/28 17:35:42 start reproducing 'WARNING in folio_memcg (full)' 2025/10/28 17:35:42 "WARNING in folio_memcg": saved crash log into 1761672942.crash.log 2025/10/28 17:35:42 "WARNING in folio_memcg": saved repro log into 1761672942.repro.log 2025/10/28 17:35:48 runner 1 connected 2025/10/28 17:36:11 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:36:11 runner 2 connected 2025/10/28 17:36:50 base crash: possible deadlock in ocfs2_init_acl 2025/10/28 17:37:05 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/10/28 17:37:30 base crash: kernel BUG in txAbort 2025/10/28 17:37:35 attempt #0 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 17:37:46 runner 2 connected 2025/10/28 17:37:54 runner 0 connected 2025/10/28 17:38:14 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/10/28 17:38:20 runner 1 connected 2025/10/28 17:38:44 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/10/28 17:39:03 runner 7 connected 2025/10/28 17:39:13 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:39:26 attempt #1 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 17:39:29 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/10/28 17:39:40 runner 1 connected 2025/10/28 17:40:03 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:40:09 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 345, "corpus": 46533, "corpus [files]": 18900, "corpus [symbols]": 19558, "cover overflows": 71363, "coverage": 306291, "distributor delayed": 57362, "distributor undelayed": 57361, "distributor violated": 728, "exec candidate": 81237, "exec collide": 14063, "exec fuzz": 26580, "exec gen": 1353, "exec hints": 14370, "exec inject": 0, "exec minimize": 10211, "exec retries": 37, "exec seeds": 1121, "exec smash": 9221, "exec total [base]": 205778, "exec total [new]": 417168, "exec triage": 147664, "executor restarts [base]": 704, "executor restarts [new]": 1966, "fault jobs": 0, "fuzzer jobs": 17, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 4, "hints jobs": 6, "max signal": 310963, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 5958, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 1, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47884, "no exec duration": 225215000000, "no exec requests": 925, "pending": 0, "prog exec time": 392, "reproducing": 3, "rpc recv": 21459664196, "rpc sent": 4890695048, "signal": 301683, "smash jobs": 4, "triage jobs": 7, "vm output": 91750793, "vm restarts [base]": 70, "vm restarts [new]": 267 } 2025/10/28 17:40:17 runner 1 connected 2025/10/28 17:40:38 base crash: BUG: sleeping function called from invalid context in hook_sb_delete 2025/10/28 17:40:43 base crash: kernel BUG in txUnlock 2025/10/28 17:41:06 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:41:19 attempt #2 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 17:41:19 patched-only: WARNING in folio_memcg 2025/10/28 17:41:26 runner 2 connected 2025/10/28 17:41:32 runner 1 connected 2025/10/28 17:42:08 runner 0 connected 2025/10/28 17:42:10 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:42:14 base crash: WARNING in xfrm_state_fini 2025/10/28 17:42:23 reproducing crash 'WARNING in driver_unregister': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f net/sched/sch_generic.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:42:27 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:42:47 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/10/28 17:43:03 runner 2 connected 2025/10/28 17:43:18 failed repro for "INFO: task hung in addrconf_verify_work", err=%!s() 2025/10/28 17:43:18 repro finished 'INFO: task hung in addrconf_verify_work', repro=false crepro=false desc='' hub=false from_dashboard=false 2025/10/28 17:43:18 "INFO: task hung in addrconf_verify_work": saved crash log into 1761673398.crash.log 2025/10/28 17:43:18 "INFO: task hung in addrconf_verify_work": saved repro log into 1761673398.repro.log 2025/10/28 17:43:20 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 17:43:30 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:43:37 runner 1 connected 2025/10/28 17:43:46 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:44:09 runner 0 connected 2025/10/28 17:44:20 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 17:44:23 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:44:31 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 17:44:39 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:44:54 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:45:03 reproducing crash 'WARNING in driver_unregister': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f net/sched/sch_generic.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:45:09 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 374, "corpus": 46553, "corpus [files]": 18909, "corpus [symbols]": 19568, "cover overflows": 73484, "coverage": 306330, "distributor delayed": 57472, "distributor undelayed": 57467, "distributor violated": 731, "exec candidate": 81237, "exec collide": 15729, "exec fuzz": 29698, "exec gen": 1512, "exec hints": 15042, "exec inject": 0, "exec minimize": 10827, "exec retries": 37, "exec seeds": 1171, "exec smash": 9718, "exec total [base]": 207917, "exec total [new]": 424129, "exec triage": 147847, "executor restarts [base]": 740, "executor restarts [new]": 2002, "fault jobs": 0, "fuzzer jobs": 12, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 2, "hints jobs": 4, "max signal": 311106, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 6245, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 1, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47943, "no exec duration": 234196000000, "no exec requests": 940, "pending": 0, "prog exec time": 499, "reproducing": 2, "rpc recv": 21820934944, "rpc sent": 5062143896, "signal": 301725, "smash jobs": 1, "triage jobs": 7, "vm output": 94365559, "vm restarts [base]": 75, "vm restarts [new]": 269 } 2025/10/28 17:45:10 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 17:45:10 runner 8 connected 2025/10/28 17:45:20 runner 1 connected 2025/10/28 17:45:29 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:45:52 runner 2 connected 2025/10/28 17:45:58 runner 7 connected 2025/10/28 17:45:58 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:46:03 base crash: possible deadlock in hfsplus_get_block 2025/10/28 17:46:27 patched crashed: possible deadlock in ocfs2_setattr [need repro = true] 2025/10/28 17:46:27 scheduled a reproduction of 'possible deadlock in ocfs2_setattr' 2025/10/28 17:46:27 start reproducing 'possible deadlock in ocfs2_setattr' 2025/10/28 17:46:34 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:46:50 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/10/28 17:46:55 runner 0 connected 2025/10/28 17:46:58 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/10/28 17:47:16 reproducing crash 'possible deadlock in ocfs2_setattr': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/file.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:47:17 runner 7 connected 2025/10/28 17:47:32 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:47:39 runner 1 connected 2025/10/28 17:47:47 runner 6 connected 2025/10/28 17:47:52 reproducing crash 'possible deadlock in ocfs2_setattr': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/file.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:48:52 reproducing crash 'possible deadlock in ocfs2_setattr': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/file.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:49:09 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:49:23 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 17:49:40 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 17:49:50 reproducing crash 'possible deadlock in ocfs2_setattr': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/file.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:50:09 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 399, "corpus": 46568, "corpus [files]": 18918, "corpus [symbols]": 19575, "cover overflows": 74914, "coverage": 306356, "distributor delayed": 57588, "distributor undelayed": 57583, "distributor violated": 731, "exec candidate": 81237, "exec collide": 17049, "exec fuzz": 32041, "exec gen": 1665, "exec hints": 15579, "exec inject": 0, "exec minimize": 11477, "exec retries": 37, "exec seeds": 1212, "exec smash": 10042, "exec total [base]": 211241, "exec total [new]": 429673, "exec triage": 148020, "executor restarts [base]": 773, "executor restarts [new]": 2083, "fault jobs": 0, "fuzzer jobs": 18, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 3, "hints jobs": 3, "max signal": 311226, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 6586, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 1, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47999, "no exec duration": 246703000000, "no exec requests": 957, "pending": 0, "prog exec time": 455, "reproducing": 3, "rpc recv": 22279637028, "rpc sent": 5297688544, "signal": 301741, "smash jobs": 2, "triage jobs": 13, "vm output": 97122707, "vm restarts [base]": 77, "vm restarts [new]": 275 } 2025/10/28 17:50:17 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:50:20 runner 8 connected 2025/10/28 17:50:33 reproducing crash 'possible deadlock in ocfs2_setattr': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/file.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:50:37 runner 1 connected 2025/10/28 17:51:09 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 17:51:18 reproducing crash 'no output/lost connection': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:51:18 repro finished 'WARNING in folio_memcg (full)', repro=true crepro=true desc='WARNING in folio_memcg' hub=false from_dashboard=false 2025/10/28 17:51:18 found repro for "WARNING in folio_memcg" (orig title: "-SAME-", reliability: 1), took 15.61 minutes 2025/10/28 17:51:18 "WARNING in folio_memcg": saved crash log into 1761673878.crash.log 2025/10/28 17:51:18 "WARNING in folio_memcg": saved repro log into 1761673878.repro.log 2025/10/28 17:51:42 reproducing crash 'possible deadlock in ocfs2_setattr': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/file.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:51:57 crash "INFO: trying to register non-static key in ocfs2_dlm_shutdown" is already known 2025/10/28 17:51:57 base crash "INFO: trying to register non-static key in ocfs2_dlm_shutdown" is to be ignored 2025/10/28 17:51:57 patched crashed: INFO: trying to register non-static key in ocfs2_dlm_shutdown [need repro = false] 2025/10/28 17:51:58 runner 1 connected 2025/10/28 17:52:16 reproducing crash 'possible deadlock in ocfs2_setattr': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/file.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:52:28 patched crashed: WARNING in folio_memcg [need repro = true] 2025/10/28 17:52:28 scheduled a reproduction of 'WARNING in folio_memcg' 2025/10/28 17:52:28 start reproducing 'WARNING in folio_memcg' 2025/10/28 17:52:46 runner 7 connected 2025/10/28 17:52:47 reproducing crash 'possible deadlock in ocfs2_setattr': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/file.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:52:50 reproducing crash 'WARNING in driver_unregister': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f net/sched/sch_generic.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:53:11 attempt #0 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 17:53:17 runner 1 connected 2025/10/28 17:53:33 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:53:53 reproducing crash 'possible deadlock in ocfs2_setattr': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/file.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:53:57 patched crashed: lost connection to test machine [need repro = false] 2025/10/28 17:54:05 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:54:14 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 17:54:27 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 17:54:37 reproducing crash 'WARNING in driver_unregister': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f net/sched/sch_generic.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:54:45 runner 7 connected 2025/10/28 17:54:51 reproducing crash 'possible deadlock in ocfs2_setattr': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/file.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:55:04 attempt #1 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 17:55:04 runner 2 connected 2025/10/28 17:55:09 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 424, "corpus": 46587, "corpus [files]": 18926, "corpus [symbols]": 19581, "cover overflows": 76325, "coverage": 306403, "distributor delayed": 57679, "distributor undelayed": 57674, "distributor violated": 735, "exec candidate": 81237, "exec collide": 18479, "exec fuzz": 34576, "exec gen": 1816, "exec hints": 16149, "exec inject": 0, "exec minimize": 11919, "exec retries": 37, "exec seeds": 1264, "exec smash": 10443, "exec total [base]": 213599, "exec total [new]": 435388, "exec triage": 148151, "executor restarts [base]": 809, "executor restarts [new]": 2139, "fault jobs": 0, "fuzzer jobs": 11, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 3, "hints jobs": 3, "max signal": 311306, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 6833, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 1, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 48043, "no exec duration": 253301000000, "no exec requests": 969, "pending": 0, "prog exec time": 430, "reproducing": 3, "rpc recv": 22619474656, "rpc sent": 5463136144, "signal": 301790, "smash jobs": 1, "triage jobs": 7, "vm output": 98976083, "vm restarts [base]": 77, "vm restarts [new]": 282 } 2025/10/28 17:55:17 runner 8 connected 2025/10/28 17:55:53 reproducing crash 'WARNING in driver_unregister': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f net/sched/sch_generic.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:55:57 reproducing crash 'possible deadlock in ocfs2_setattr': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/file.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:56:09 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:56:25 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 17:56:31 crash "WARNING in udf_truncate_extents" is already known 2025/10/28 17:56:31 base crash "WARNING in udf_truncate_extents" is to be ignored 2025/10/28 17:56:31 patched crashed: WARNING in udf_truncate_extents [need repro = false] 2025/10/28 17:56:36 patched crashed: WARNING in folio_memcg [need repro = false] 2025/10/28 17:56:54 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/10/28 17:56:58 reproducing crash 'WARNING in driver_unregister': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f net/sched/sch_generic.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:57:02 attempt #2 to run "WARNING in folio_memcg" on base: did not crash 2025/10/28 17:57:02 patched-only: WARNING in folio_memcg 2025/10/28 17:57:08 reproducing crash 'possible deadlock in ocfs2_setattr': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/file.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:57:13 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:57:15 runner 6 connected 2025/10/28 17:57:20 runner 8 connected 2025/10/28 17:57:25 runner 1 connected 2025/10/28 17:57:37 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/10/28 17:57:43 runner 7 connected 2025/10/28 17:57:51 runner 0 connected 2025/10/28 17:58:18 reproducing crash 'possible deadlock in ocfs2_setattr': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/file.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:58:25 runner 6 connected 2025/10/28 17:58:28 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:58:46 reproducing crash 'possible deadlock in ocfs2_setattr': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/file.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:59:32 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 17:59:54 reproducing crash 'possible deadlock in ocfs2_setattr': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/ocfs2/file.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 18:00:02 bug reporting terminated 2025/10/28 18:00:02 status reporting terminated 2025/10/28 18:00:02 new: rpc server terminaled 2025/10/28 18:00:02 base: rpc server terminaled 2025/10/28 18:00:02 repro finished 'possible deadlock in ocfs2_setattr', repro=false crepro=false desc='' hub=false from_dashboard=false 2025/10/28 18:00:03 reproducing crash 'WARNING in folio_memcg': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f mm/zswap.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/10/28 18:00:03 repro finished 'WARNING in folio_memcg', repro=false crepro=false desc='' hub=false from_dashboard=false 2025/10/28 18:00:12 base: pool terminated 2025/10/28 18:00:12 base: kernel context loop terminated 2025/10/28 18:02:04 repro finished 'WARNING in driver_unregister', repro=false crepro=false desc='' hub=false from_dashboard=false 2025/10/28 18:02:04 repro loop terminated 2025/10/28 18:02:04 new: pool terminated 2025/10/28 18:02:04 new: kernel context loop terminated 2025/10/28 18:02:04 diff fuzzing terminated 2025/10/28 18:02:04 fuzzing is finished 2025/10/28 18:02:04 status at the end: Title On-Base On-Patched WARNING in folio_memcg 134 crashes[reproduced] BUG: sleeping function called from invalid context in hook_sb_delete 1 crashes INFO: task hung in __iterate_supers 3 crashes 3 crashes INFO: task hung in addrconf_verify_work 1 crashes INFO: task hung in bdev_open 1 crashes INFO: task hung in corrupted 1 crashes 1 crashes INFO: task hung in read_part_sector 1 crashes INFO: task hung in reg_check_chans_work 2 crashes INFO: task hung in sync_bdevs 1 crashes INFO: trying to register non-static key in ocfs2_dlm_shutdown 2 crashes KASAN: slab-out-of-bounds Read in change_page_attr_set_clr 10 crashes 37 crashes KASAN: slab-use-after-free Read in __ethtool_get_link_ksettings 1 crashes KASAN: slab-use-after-free Read in hdm_disconnect 1 crashes 5 crashes KASAN: slab-use-after-free Read in l2cap_unregister_user 1 crashes WARNING in dax_iomap_rw 2 crashes WARNING in driver_unregister 1 crashes WARNING in udf_truncate_extents 1 crashes WARNING in xfrm6_tunnel_net_exit 2 crashes 5 crashes WARNING in xfrm_state_fini 6 crashes 10 crashes general protection fault in pcl818_ai_cancel 2 crashes 5 crashes kernel BUG in jfs_evict_inode 1 crashes 3 crashes kernel BUG in ocfs2_set_new_buffer_uptodate 1 crashes 1 crashes[reproduced] kernel BUG in ocfs2_write_cluster_by_desc 1 crashes kernel BUG in txAbort 1 crashes kernel BUG in txEnd 1 crashes kernel BUG in txUnlock 4 crashes 5 crashes lost connection to test machine 15 crashes 25 crashes possible deadlock in hfsplus_get_block 1 crashes possible deadlock in ntfs_fiemap 2 crashes possible deadlock in ocfs2_init_acl 2 crashes 7 crashes possible deadlock in ocfs2_reserve_suballoc_bits 2 crashes possible deadlock in ocfs2_setattr 1 crashes possible deadlock in ocfs2_try_remove_refcount_tree 8 crashes 16 crashes possible deadlock in ocfs2_xattr_set 1 crashes unregister_netdevice: waiting for DEV to become free 2 crashes 1 crashes 2025/10/28 18:02:04 possibly patched-only: WARNING in folio_memcg