2025/08/27 17:19:34 extracted 303777 symbol hashes for base and 303777 for patched 2025/08/27 17:19:34 binaries are different, continuing fuzzing 2025/08/27 17:19:35 adding modified_functions to focus areas: ["__UNIQUE_ID_nop1465" "__UNIQUE_ID_nop1466" "__folio_put" "__folio_split" "__ia32_sys_set_mempolicy" "__migrate_device_pages" "__se_sys_mbind" "__se_sys_migrate_pages" "__se_sys_set_mempolicy_home_node" "__x64_sys_set_mempolicy" "alloc_migration_target_by_mpol" "alloc_pages_bulk_mempolicy_noprof" "can_change_pmd_writable" "can_split_folio" "copy_huge_pmd" "do_huge_pmd_anonymous_page" "do_huge_pmd_wp_page" "folio_batch_move_lru" "folio_mark_accessed" "folio_split" "folios_put_refs" "free_folio_and_swap_cache" "free_swap_cache" "lru_activate" "lru_add" "lru_deactivate" "lru_deactivate_file" "lru_gen_clear_refs" "lru_lazyfree" "lru_move_tail" "madvise_free_huge_pmd" "mempolicy_slab_node" "mfill_atomic_continue" "mfill_atomic_copy" "mfill_atomic_poison" "mfill_atomic_zeropage" "migrate_device_unmap" "migrate_vma_collect_pmd" "mm_get_huge_zero_folio" "move_pages" "perf_trace_mm_lru_insertion" "policy_nodemask" "queue_folios_pte_range" "split_huge_pages_in_file" "split_huge_pages_write" "split_huge_pmd_locked" "trace_event_raw_event_mm_lru_insertion" "unmap_huge_pmd_locked" "weighted_interleave_auto_store" "weighted_interleave_nodes" "zap_huge_pmd"] 2025/08/27 17:19:35 adding directly modified files to focus areas: ["include/linux/huge_mm.h"] 2025/08/27 17:19:37 downloaded the corpus from https://storage.googleapis.com/syzkaller/corpus/ci-upstream-kasan-gce-root-corpus.db 2025/08/27 17:20:27 runner 3 connected 2025/08/27 17:20:32 initializing coverage information... 2025/08/27 17:20:34 runner 8 connected 2025/08/27 17:20:34 runner 2 connected 2025/08/27 17:20:34 runner 6 connected 2025/08/27 17:20:34 runner 3 connected 2025/08/27 17:20:34 runner 1 connected 2025/08/27 17:20:34 runner 0 connected 2025/08/27 17:20:34 runner 5 connected 2025/08/27 17:20:34 runner 2 connected 2025/08/27 17:20:34 runner 0 connected 2025/08/27 17:20:34 runner 7 connected 2025/08/27 17:20:34 runner 9 connected 2025/08/27 17:20:34 runner 4 connected 2025/08/27 17:20:35 runner 1 connected 2025/08/27 17:20:36 discovered 7699 source files, 338649 symbols 2025/08/27 17:20:37 coverage filter: __UNIQUE_ID_nop1465: [] 2025/08/27 17:20:37 coverage filter: __UNIQUE_ID_nop1466: [] 2025/08/27 17:20:37 coverage filter: __folio_put: [__folio_put] 2025/08/27 17:20:37 coverage filter: __folio_split: [__folio_split] 2025/08/27 17:20:37 coverage filter: __ia32_sys_set_mempolicy: [__ia32_sys_set_mempolicy __ia32_sys_set_mempolicy_home_node] 2025/08/27 17:20:37 coverage filter: __migrate_device_pages: [__migrate_device_pages] 2025/08/27 17:20:37 coverage filter: __se_sys_mbind: [__se_sys_mbind] 2025/08/27 17:20:37 coverage filter: __se_sys_migrate_pages: [__se_sys_migrate_pages] 2025/08/27 17:20:37 coverage filter: __se_sys_set_mempolicy_home_node: [__se_sys_set_mempolicy_home_node] 2025/08/27 17:20:37 coverage filter: __x64_sys_set_mempolicy: [__x64_sys_set_mempolicy __x64_sys_set_mempolicy_home_node] 2025/08/27 17:20:37 coverage filter: alloc_migration_target_by_mpol: [alloc_migration_target_by_mpol] 2025/08/27 17:20:37 coverage filter: alloc_pages_bulk_mempolicy_noprof: [alloc_pages_bulk_mempolicy_noprof] 2025/08/27 17:20:37 coverage filter: can_change_pmd_writable: [can_change_pmd_writable] 2025/08/27 17:20:37 coverage filter: can_split_folio: [can_split_folio] 2025/08/27 17:20:37 coverage filter: copy_huge_pmd: [copy_huge_pmd] 2025/08/27 17:20:37 coverage filter: do_huge_pmd_anonymous_page: [do_huge_pmd_anonymous_page] 2025/08/27 17:20:37 coverage filter: do_huge_pmd_wp_page: [do_huge_pmd_wp_page] 2025/08/27 17:20:37 coverage filter: folio_batch_move_lru: [folio_batch_move_lru] 2025/08/27 17:20:37 coverage filter: folio_mark_accessed: [folio_mark_accessed] 2025/08/27 17:20:37 coverage filter: folio_split: [erofs_onlinefolio_split folio_split try_folio_split] 2025/08/27 17:20:37 coverage filter: folios_put_refs: [folios_put_refs] 2025/08/27 17:20:37 coverage filter: free_folio_and_swap_cache: [free_folio_and_swap_cache] 2025/08/27 17:20:37 coverage filter: free_swap_cache: [free_swap_cache] 2025/08/27 17:20:37 coverage filter: lru_activate: [__bpf_trace_mm_lru_activate __probestub_mm_lru_activate __traceiter_mm_lru_activate lru_activate perf_trace_mm_lru_activate trace_event_raw_event_mm_lru_activate trace_raw_output_mm_lru_activate] 2025/08/27 17:20:37 coverage filter: lru_add: [__lru_add_drain_all __probestub_nfsd_file_lru_add __traceiter_nfsd_file_lru_add d_lru_add list_lru_add list_lru_add_obj lru_add lru_add_drain lru_add_drain_all lru_add_drain_cpu lru_add_drain_cpu_zone lru_add_drain_per_cpu lru_add_split_folio] 2025/08/27 17:20:37 coverage filter: lru_deactivate: [lru_deactivate lru_deactivate_file] 2025/08/27 17:20:37 coverage filter: lru_deactivate_file: [] 2025/08/27 17:20:37 coverage filter: lru_gen_clear_refs: [lru_gen_clear_refs] 2025/08/27 17:20:37 coverage filter: lru_lazyfree: [lru_lazyfree] 2025/08/27 17:20:37 coverage filter: lru_move_tail: [drm_gem_lru_move_tail drm_gem_lru_move_tail_locked lru_move_tail] 2025/08/27 17:20:37 coverage filter: madvise_free_huge_pmd: [madvise_free_huge_pmd] 2025/08/27 17:20:37 coverage filter: mempolicy_slab_node: [mempolicy_slab_node] 2025/08/27 17:20:37 coverage filter: mfill_atomic_continue: [mfill_atomic_continue] 2025/08/27 17:20:37 coverage filter: mfill_atomic_copy: [mfill_atomic_copy] 2025/08/27 17:20:37 coverage filter: mfill_atomic_poison: [mfill_atomic_poison] 2025/08/27 17:20:37 coverage filter: mfill_atomic_zeropage: [mfill_atomic_zeropage] 2025/08/27 17:20:37 coverage filter: migrate_device_unmap: [migrate_device_unmap] 2025/08/27 17:20:37 coverage filter: migrate_vma_collect_pmd: [migrate_vma_collect_pmd] 2025/08/27 17:20:37 coverage filter: mm_get_huge_zero_folio: [mm_get_huge_zero_folio] 2025/08/27 17:20:37 coverage filter: move_pages: [__ia32_sys_move_pages __remove_pages __se_sys_move_pages __x64_sys_move_pages move_pages move_pages_and_store_status move_pages_huge_pmd move_pages_pte sgx_encl_remove_pages] 2025/08/27 17:20:37 coverage filter: perf_trace_mm_lru_insertion: [perf_trace_mm_lru_insertion] 2025/08/27 17:20:37 coverage filter: policy_nodemask: [policy_nodemask] 2025/08/27 17:20:37 coverage filter: queue_folios_pte_range: [queue_folios_pte_range] 2025/08/27 17:20:37 coverage filter: split_huge_pages_in_file: [split_huge_pages_in_file] 2025/08/27 17:20:37 coverage filter: split_huge_pages_write: [split_huge_pages_write] 2025/08/27 17:20:37 coverage filter: split_huge_pmd_locked: [split_huge_pmd_locked] 2025/08/27 17:20:37 coverage filter: trace_event_raw_event_mm_lru_insertion: [trace_event_raw_event_mm_lru_insertion] 2025/08/27 17:20:37 coverage filter: unmap_huge_pmd_locked: [unmap_huge_pmd_locked] 2025/08/27 17:20:37 coverage filter: weighted_interleave_auto_store: [weighted_interleave_auto_store] 2025/08/27 17:20:37 coverage filter: weighted_interleave_nodes: [weighted_interleave_nodes] 2025/08/27 17:20:37 coverage filter: zap_huge_pmd: [zap_huge_pmd] 2025/08/27 17:20:37 coverage filter: include/linux/huge_mm.h: [] 2025/08/27 17:20:37 area "symbols": 5567 PCs in the cover filter 2025/08/27 17:20:37 area "files": 0 PCs in the cover filter 2025/08/27 17:20:37 area "": 0 PCs in the cover filter 2025/08/27 17:20:37 executor cover filter: 0 PCs 2025/08/27 17:20:41 executor cover filter: 0 PCs 2025/08/27 17:20:43 machine check: disabled the following syscalls: fsetxattr$security_selinux : selinux is not enabled fsetxattr$security_smack_transmute : smack is not enabled fsetxattr$smack_xattr_label : smack is not enabled get_thread_area : syscall get_thread_area is not present lookup_dcookie : syscall lookup_dcookie is not present lsetxattr$security_selinux : selinux is not enabled lsetxattr$security_smack_transmute : smack is not enabled lsetxattr$smack_xattr_label : smack is not enabled mount$esdfs : /proc/filesystems does not contain esdfs mount$incfs : /proc/filesystems does not contain incremental-fs openat$acpi_thermal_rel : failed to open /dev/acpi_thermal_rel: no such file or directory openat$ashmem : failed to open /dev/ashmem: no such file or directory openat$bifrost : failed to open /dev/bifrost: no such file or directory openat$binder : failed to open /dev/binder: no such file or directory openat$camx : failed to open /dev/v4l/by-path/platform-soc@0:qcom_cam-req-mgr-video-index0: no such file or directory openat$capi20 : failed to open /dev/capi20: no such file or directory openat$cdrom1 : failed to open /dev/cdrom1: no such file or directory openat$damon_attrs : failed to open /sys/kernel/debug/damon/attrs: no such file or directory openat$damon_init_regions : failed to open /sys/kernel/debug/damon/init_regions: no such file or directory openat$damon_kdamond_pid : failed to open /sys/kernel/debug/damon/kdamond_pid: no such file or directory openat$damon_mk_contexts : failed to open /sys/kernel/debug/damon/mk_contexts: no such file or directory openat$damon_monitor_on : failed to open /sys/kernel/debug/damon/monitor_on: no such file or directory openat$damon_rm_contexts : failed to open /sys/kernel/debug/damon/rm_contexts: no such file or directory openat$damon_schemes : failed to open /sys/kernel/debug/damon/schemes: no such file or directory openat$damon_target_ids : failed to open /sys/kernel/debug/damon/target_ids: no such file or directory openat$hwbinder : failed to open /dev/hwbinder: no such file or directory openat$i915 : failed to open /dev/i915: no such file or directory openat$img_rogue : failed to open /dev/img-rogue: no such file or directory openat$irnet : failed to open /dev/irnet: no such file or directory openat$keychord : failed to open /dev/keychord: no such file or directory openat$kvm : failed to open /dev/kvm: no such file or directory openat$lightnvm : failed to open /dev/lightnvm/control: no such file or directory openat$mali : failed to open /dev/mali0: no such file or directory openat$md : failed to open /dev/md0: no such file or directory openat$msm : failed to open /dev/msm: no such file or directory openat$ndctl0 : failed to open /dev/ndctl0: no such file or directory openat$nmem0 : failed to open /dev/nmem0: no such file or directory openat$pktcdvd : failed to open /dev/pktcdvd/control: no such file or directory openat$pmem0 : failed to open /dev/pmem0: no such file or directory openat$proc_capi20 : failed to open /proc/capi/capi20: no such file or directory openat$proc_capi20ncci : failed to open /proc/capi/capi20ncci: no such file or directory openat$proc_reclaim : failed to open /proc/self/reclaim: no such file or directory openat$ptp1 : failed to open /dev/ptp1: no such file or directory openat$rnullb : failed to open /dev/rnullb0: no such file or directory openat$selinux_access : failed to open /selinux/access: no such file or directory openat$selinux_attr : selinux is not enabled openat$selinux_avc_cache_stats : failed to open /selinux/avc/cache_stats: no such file or directory openat$selinux_avc_cache_threshold : failed to open /selinux/avc/cache_threshold: no such file or directory openat$selinux_avc_hash_stats : failed to open /selinux/avc/hash_stats: no such file or directory openat$selinux_checkreqprot : failed to open /selinux/checkreqprot: no such file or directory openat$selinux_commit_pending_bools : failed to open /selinux/commit_pending_bools: no such file or directory openat$selinux_context : failed to open /selinux/context: no such file or directory openat$selinux_create : failed to open /selinux/create: no such file or directory openat$selinux_enforce : failed to open /selinux/enforce: no such file or directory openat$selinux_load : failed to open /selinux/load: no such file or directory openat$selinux_member : failed to open /selinux/member: no such file or directory openat$selinux_mls : failed to open /selinux/mls: no such file or directory openat$selinux_policy : failed to open /selinux/policy: no such file or directory openat$selinux_relabel : failed to open /selinux/relabel: no such file or directory openat$selinux_status : failed to open /selinux/status: no such file or directory openat$selinux_user : failed to open /selinux/user: no such file or directory openat$selinux_validatetrans : failed to open /selinux/validatetrans: no such file or directory openat$sev : failed to open /dev/sev: no such file or directory openat$sgx_provision : failed to open /dev/sgx_provision: no such file or directory openat$smack_task_current : smack is not enabled openat$smack_thread_current : smack is not enabled openat$smackfs_access : failed to open /sys/fs/smackfs/access: no such file or directory openat$smackfs_ambient : failed to open /sys/fs/smackfs/ambient: no such file or directory openat$smackfs_change_rule : failed to open /sys/fs/smackfs/change-rule: no such file or directory openat$smackfs_cipso : failed to open /sys/fs/smackfs/cipso: no such file or directory openat$smackfs_cipsonum : failed to open /sys/fs/smackfs/direct: no such file or directory openat$smackfs_ipv6host : failed to open /sys/fs/smackfs/ipv6host: no such file or directory openat$smackfs_load : failed to open /sys/fs/smackfs/load: no such file or directory openat$smackfs_logging : failed to open /sys/fs/smackfs/logging: no such file or directory openat$smackfs_netlabel : failed to open /sys/fs/smackfs/netlabel: no such file or directory openat$smackfs_onlycap : failed to open /sys/fs/smackfs/onlycap: no such file or directory openat$smackfs_ptrace : failed to open /sys/fs/smackfs/ptrace: no such file or directory openat$smackfs_relabel_self : failed to open /sys/fs/smackfs/relabel-self: no such file or directory openat$smackfs_revoke_subject : failed to open /sys/fs/smackfs/revoke-subject: no such file or directory openat$smackfs_syslog : failed to open /sys/fs/smackfs/syslog: no such file or directory openat$smackfs_unconfined : failed to open /sys/fs/smackfs/unconfined: no such file or directory openat$tlk_device : failed to open /dev/tlk_device: no such file or directory openat$trusty : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_avb : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_gatekeeper : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwkey : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwrng : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km_secure : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_storage : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$tty : failed to open /dev/tty: no such device or address openat$uverbs0 : failed to open /dev/infiniband/uverbs0: no such file or directory openat$vfio : failed to open /dev/vfio/vfio: no such file or directory openat$vndbinder : failed to open /dev/vndbinder: no such file or directory openat$vtpm : failed to open /dev/vtpmx: no such file or directory openat$xenevtchn : failed to open /dev/xen/evtchn: no such file or directory openat$zygote : failed to open /dev/socket/zygote: no such file or directory pkey_alloc : pkey_alloc(0x0, 0x0) failed: no space left on device read$smackfs_access : smack is not enabled read$smackfs_cipsonum : smack is not enabled read$smackfs_logging : smack is not enabled read$smackfs_ptrace : smack is not enabled set_thread_area : syscall set_thread_area is not present setxattr$security_selinux : selinux is not enabled setxattr$security_smack_transmute : smack is not enabled setxattr$smack_xattr_label : smack is not enabled socket$hf : socket$hf(0x13, 0x2, 0x0) failed: address family not supported by protocol socket$inet6_dccp : socket$inet6_dccp(0xa, 0x6, 0x0) failed: socket type not supported socket$inet_dccp : socket$inet_dccp(0x2, 0x6, 0x0) failed: socket type not supported socket$vsock_dgram : socket$vsock_dgram(0x28, 0x2, 0x0) failed: no such device syz_btf_id_by_name$bpf_lsm : failed to open /sys/kernel/btf/vmlinux: no such file or directory syz_init_net_socket$bt_cmtp : syz_init_net_socket$bt_cmtp(0x1f, 0x3, 0x5) failed: protocol not supported syz_kvm_setup_cpu$ppc64 : unsupported arch syz_mount_image$ntfs : /proc/filesystems does not contain ntfs syz_mount_image$reiserfs : /proc/filesystems does not contain reiserfs syz_mount_image$sysv : /proc/filesystems does not contain sysv syz_mount_image$v7 : /proc/filesystems does not contain v7 syz_open_dev$dricontrol : failed to open /dev/dri/controlD#: no such file or directory syz_open_dev$drirender : failed to open /dev/dri/renderD#: no such file or directory syz_open_dev$floppy : failed to open /dev/fd#: no such file or directory syz_open_dev$ircomm : failed to open /dev/ircomm#: no such file or directory syz_open_dev$sndhw : failed to open /dev/snd/hwC#D#: no such file or directory syz_pkey_set : pkey_alloc(0x0, 0x0) failed: no space left on device uselib : syscall uselib is not present write$selinux_access : selinux is not enabled write$selinux_attr : selinux is not enabled write$selinux_context : selinux is not enabled write$selinux_create : selinux is not enabled write$selinux_load : selinux is not enabled write$selinux_user : selinux is not enabled write$selinux_validatetrans : selinux is not enabled write$smack_current : smack is not enabled write$smackfs_access : smack is not enabled write$smackfs_change_rule : smack is not enabled write$smackfs_cipso : smack is not enabled write$smackfs_cipsonum : smack is not enabled write$smackfs_ipv6host : smack is not enabled write$smackfs_label : smack is not enabled write$smackfs_labels_list : smack is not enabled write$smackfs_load : smack is not enabled write$smackfs_logging : smack is not enabled write$smackfs_netlabel : smack is not enabled write$smackfs_ptrace : smack is not enabled transitively disabled the following syscalls (missing resource [creating syscalls]): bind$vsock_dgram : sock_vsock_dgram [socket$vsock_dgram] close$ibv_device : fd_rdma [openat$uverbs0] connect$hf : sock_hf [socket$hf] connect$vsock_dgram : sock_vsock_dgram [socket$vsock_dgram] getsockopt$inet6_dccp_buf : sock_dccp6 [socket$inet6_dccp] getsockopt$inet6_dccp_int : sock_dccp6 [socket$inet6_dccp] getsockopt$inet_dccp_buf : sock_dccp [socket$inet_dccp] getsockopt$inet_dccp_int : sock_dccp [socket$inet_dccp] ioctl$ACPI_THERMAL_GET_ART : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_ART_COUNT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_ART_LEN : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT_COUNT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT_LEN : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ASHMEM_GET_NAME : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_PIN_STATUS : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_PROT_MASK : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_SIZE : fd_ashmem [openat$ashmem] ioctl$ASHMEM_PURGE_ALL_CACHES : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_NAME : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_PROT_MASK : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_SIZE : fd_ashmem [openat$ashmem] ioctl$CAPI_CLR_FLAGS : fd_capi20 [openat$capi20] ioctl$CAPI_GET_ERRCODE : fd_capi20 [openat$capi20] ioctl$CAPI_GET_FLAGS : fd_capi20 [openat$capi20] ioctl$CAPI_GET_MANUFACTURER : fd_capi20 [openat$capi20] ioctl$CAPI_GET_PROFILE : fd_capi20 [openat$capi20] ioctl$CAPI_GET_SERIAL : fd_capi20 [openat$capi20] ioctl$CAPI_INSTALLED : fd_capi20 [openat$capi20] ioctl$CAPI_MANUFACTURER_CMD : fd_capi20 [openat$capi20] ioctl$CAPI_NCCI_GETUNIT : fd_capi20 [openat$capi20] ioctl$CAPI_NCCI_OPENCOUNT : fd_capi20 [openat$capi20] ioctl$CAPI_REGISTER : fd_capi20 [openat$capi20] ioctl$CAPI_SET_FLAGS : fd_capi20 [openat$capi20] ioctl$CREATE_COUNTERS : fd_rdma [openat$uverbs0] ioctl$DESTROY_COUNTERS : fd_rdma [openat$uverbs0] ioctl$DRM_IOCTL_I915_GEM_BUSY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_DESTROY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER2 : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER2_WR : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_APERTURE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_CACHING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_TILING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MADVISE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP_GTT : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP_OFFSET : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PREAD : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PWRITE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_CACHING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_DOMAIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_TILING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SW_FINISH : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_THROTTLE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_UNPIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_USERPTR : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_VM_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_VM_DESTROY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_WAIT : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GET_RESET_STATS : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_OVERLAY_ATTRS : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_OVERLAY_PUT_IMAGE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_ADD_CONFIG : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_OPEN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_REMOVE_CONFIG : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_QUERY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_REG_READ : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_SET_SPRITE_COLORKEY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_MSM_GEM_CPU_FINI : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_CPU_PREP : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_INFO : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_MADVISE : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_NEW : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_SUBMIT : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GET_PARAM : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SET_PARAM : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_NEW : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_QUERY : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_WAIT_FENCE : fd_msm [openat$msm] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPEXEC: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_HTBUFFER_HTBLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_CHANGESPARSEMEM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTPIN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMREXPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRGETUID: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRIMPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNREFPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLWRITEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_CONNECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_DISCONNECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMSET: fd_rogue [openat$img_rogue] ioctl$FLOPPY_FDCLRPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDDEFPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDEJECT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFLUSH : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTBEG : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTEND : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTTRK : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVTYP : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETFDCSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETMAXERRS : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDMSGOFF : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDMSGON : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDPOLLDRVSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDRAWCMD : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDRESET : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETDRVPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETEMSGTRESH : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETMAXERRS : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDTWADDLE : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDWERRORCLR : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDWERRORGET : fd_floppy [syz_open_dev$floppy] ioctl$KBASE_HWCNT_READER_CLEAR : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_DISABLE_EVENT : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_DUMP : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_ENABLE_EVENT : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_API_VERSION : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_API_VERSION_WITH_FEATURES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER_SIZE : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER_WITH_CYCLES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_HWVER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_PUT_BUFFER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_PUT_BUFFER_WITH_CYCLES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_SET_INTERVAL : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_IOCTL_BUFFER_LIVENESS_UPDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CONTEXT_PRIORITY_CHECK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_CPU_QUEUE_DUMP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_EVENT_SIGNAL : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_GET_GLB_IFACE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_BIND : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_KICK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_REGISTER : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_REGISTER_EX : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_TERMINATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_TERM : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_DISJOINT_QUERY : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_FENCE_VALIDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_CONTEXT_ID : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_CPU_GPU_TIMEINFO : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_DDK_VERSION : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_GPUPROPS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_CLEAR : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_DUMP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_ENABLE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_READER_SETUP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_SET : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_JOB_SUBMIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_DELETE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_ENQUEUE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_CMD : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_GET_SAMPLE : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_PUT_SAMPLE : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALIAS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALLOC : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALLOC_EX : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_COMMIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_EXEC_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FIND_CPU_OFFSET : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET: fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FLAGS_CHANGE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FREE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_IMPORT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT_10_2 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT_11_5 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_PROFILE_ADD : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_QUERY : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_SYNC : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_POST_TERM : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_READ_USER_PAGE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SET_FLAGS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SET_LIMITED_CORE_COUNT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SOFT_EVENT_UPDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STICKY_RESOURCE_MAP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STICKY_RESOURCE_UNMAP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STREAM_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_TLSTREAM_ACQUIRE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_TLSTREAM_FLUSH : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_VERSION_CHECK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_VERSION_CHECK_RESERVED : fd_bifrost [openat$bifrost openat$mali] ioctl$KVM_ASSIGN_SET_MSIX_ENTRY : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_ASSIGN_SET_MSIX_NR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DIRTY_LOG_RING : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DIRTY_LOG_RING_ACQ_REL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DISABLE_QUIRKS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DISABLE_QUIRKS2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_ENFORCE_PV_FEATURE_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_EXCEPTION_PAYLOAD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_EXIT_HYPERCALL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_EXIT_ON_EMULATION_FAILURE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HALT_POLL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_DIRECT_TLBFLUSH : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_ENFORCE_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_ENLIGHTENED_VMCS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_SEND_IPI : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_SYNIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_SYNIC2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_TLBFLUSH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_VP_INDEX : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MAX_VCPU_ID : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MEMORY_FAULT_INFO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MSR_PLATFORM_INFO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_PMU_CAPABILITY : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_PTP_KVM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SGX_ATTRIBUTE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SPLIT_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_STEAL_TIME : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SYNC_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_VM_COPY_ENC_CONTEXT_FROM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_DISABLE_NX_HUGE_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_TYPES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X2APIC_API : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_APIC_BUS_CYCLES_NS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_BUS_LOCK_EXIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_DISABLE_EXITS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_GUEST_MODE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_NOTIFY_VMEXIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_USER_SPACE_MSR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_XEN_HVM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CHECK_EXTENSION : fd_kvm [openat$kvm] ioctl$KVM_CHECK_EXTENSION_VM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CLEAR_DIRTY_LOG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_DEVICE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_GUEST_MEMFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_VCPU : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_VM : fd_kvm [openat$kvm] ioctl$KVM_DIRTY_TLB : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_API_VERSION : fd_kvm [openat$kvm] ioctl$KVM_GET_CLOCK : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_CPUID2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEBUGREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_GET_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_DIRTY_LOG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_EMULATED_CPUID : fd_kvm [openat$kvm] ioctl$KVM_GET_FPU : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_LAPIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MP_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MSRS_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MSRS_sys : fd_kvm [openat$kvm] ioctl$KVM_GET_MSR_FEATURE_INDEX_LIST : fd_kvm [openat$kvm] ioctl$KVM_GET_MSR_INDEX_LIST : fd_kvm [openat$kvm] ioctl$KVM_GET_NESTED_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_NR_MMU_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_ONE_REG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_PIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_REG_LIST : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SREGS2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_STATS_FD_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_STATS_FD_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_SUPPORTED_CPUID : fd_kvm [openat$kvm] ioctl$KVM_GET_SUPPORTED_HV_CPUID_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SUPPORTED_HV_CPUID_sys : fd_kvm [openat$kvm] ioctl$KVM_GET_TSC_KHZ_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_TSC_KHZ_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_VCPU_EVENTS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_VCPU_MMAP_SIZE : fd_kvm [openat$kvm] ioctl$KVM_GET_XCRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_XSAVE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_XSAVE2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_HAS_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_HAS_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_HAS_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_HYPERV_EVENTFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_INTERRUPT : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_IOEVENTFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQ_LINE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQ_LINE_STATUS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_KVMCLOCK_CTRL : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_MEMORY_ENCRYPT_REG_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_MEMORY_ENCRYPT_UNREG_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_NMI : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_PPC_ALLOCATE_HTAB : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_PRE_FAULT_MEMORY : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_REGISTER_COALESCED_MMIO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_REINJECT_CONTROL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_RESET_DIRTY_RINGS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_RUN : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_S390_VCPU_FAULT : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_BOOT_CPU_ID : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_CLOCK : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_CPUID2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEBUGREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_SET_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_FPU : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_GSI_ROUTING : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_GUEST_DEBUG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_IDENTITY_MAP_ADDR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_LAPIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_MEMORY_ATTRIBUTES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_MP_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_MSRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_NESTED_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_NR_MMU_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_ONE_REG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_PIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SIGNAL_MASK : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SREGS2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_TSC_KHZ_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_TSC_KHZ_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_TSS_ADDR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_USER_MEMORY_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_USER_MEMORY_REGION2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_VAPIC_ADDR : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_VCPU_EVENTS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_XCRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_XSAVE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SEV_CERT_EXPORT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_DBG_DECRYPT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_DBG_ENCRYPT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_ES_INIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_GET_ATTESTATION_REPORT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_GUEST_STATUS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_INIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_INIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_MEASURE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_SECRET : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_CANCEL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_UPDATE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SIGNAL_MSI : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SMI : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_TPR_ACCESS_REPORTING : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_TRANSLATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_UNREGISTER_COALESCED_MMIO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_X86_GET_MCE_CAP_SUPPORTED : fd_kvm [openat$kvm] ioctl$KVM_X86_SETUP_MCE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_X86_SET_MCE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_X86_SET_MSR_FILTER : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_XEN_HVM_CONFIG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$PERF_EVENT_IOC_DISABLE : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_ENABLE : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_ID : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_MODIFY_ATTRIBUTES : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_PAUSE_OUTPUT : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_PERIOD : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_QUERY_BPF : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_REFRESH : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_RESET : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_BPF : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_FILTER : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_OUTPUT : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$READ_COUNTERS : fd_rdma [openat$uverbs0] ioctl$SNDRV_FIREWIRE_IOCTL_GET_INFO : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_LOCK : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_TASCAM_STATE : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_UNLOCK : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_DSP_LOAD : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_DSP_STATUS : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_INFO : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_PVERSION : fd_snd_hw [syz_open_dev$sndhw] ioctl$TE_IOCTL_CLOSE_CLIENT_SESSION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_LAUNCH_OPERATION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_OPEN_CLIENT_SESSION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_SS_CMD : fd_tlk [openat$tlk_device] ioctl$TIPC_IOC_CONNECT : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] ioctl$TIPC_IOC_CONNECT_avb : fd_trusty_avb [openat$trusty_avb] ioctl$TIPC_IOC_CONNECT_gatekeeper : fd_trusty_gatekeeper [openat$trusty_gatekeeper] ioctl$TIPC_IOC_CONNECT_hwkey : fd_trusty_hwkey [openat$trusty_hwkey] ioctl$TIPC_IOC_CONNECT_hwrng : fd_trusty_hwrng [openat$trusty_hwrng] ioctl$TIPC_IOC_CONNECT_keymaster_secure : fd_trusty_km_secure [openat$trusty_km_secure] ioctl$TIPC_IOC_CONNECT_km : fd_trusty_km [openat$trusty_km] ioctl$TIPC_IOC_CONNECT_storage : fd_trusty_storage [openat$trusty_storage] ioctl$VFIO_CHECK_EXTENSION : fd_vfio [openat$vfio] ioctl$VFIO_GET_API_VERSION : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_GET_INFO : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_MAP_DMA : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_UNMAP_DMA : fd_vfio [openat$vfio] ioctl$VFIO_SET_IOMMU : fd_vfio [openat$vfio] ioctl$VTPM_PROXY_IOC_NEW_DEV : fd_vtpm [openat$vtpm] ioctl$sock_bt_cmtp_CMTPCONNADD : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPCONNDEL : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPGETCONNINFO : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPGETCONNLIST : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] mmap$DRM_I915 : fd_i915 [openat$i915] mmap$DRM_MSM : fd_msm [openat$msm] mmap$KVM_VCPU : vcpu_mmap_size [ioctl$KVM_GET_VCPU_MMAP_SIZE] mmap$bifrost : fd_bifrost [openat$bifrost openat$mali] mmap$perf : fd_perf [perf_event_open perf_event_open$cgroup] pkey_free : pkey [pkey_alloc] pkey_mprotect : pkey [pkey_alloc] read$sndhw : fd_snd_hw [syz_open_dev$sndhw] read$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] recvmsg$hf : sock_hf [socket$hf] sendmsg$hf : sock_hf [socket$hf] setsockopt$inet6_dccp_buf : sock_dccp6 [socket$inet6_dccp] setsockopt$inet6_dccp_int : sock_dccp6 [socket$inet6_dccp] setsockopt$inet_dccp_buf : sock_dccp [socket$inet_dccp] setsockopt$inet_dccp_int : sock_dccp [socket$inet_dccp] syz_kvm_add_vcpu$x86 : kvm_syz_vm$x86 [syz_kvm_setup_syzos_vm$x86] syz_kvm_assert_syzos_uexit$x86 : kvm_run_ptr [mmap$KVM_VCPU] syz_kvm_setup_cpu$x86 : fd_kvmvm [ioctl$KVM_CREATE_VM] syz_kvm_setup_syzos_vm$x86 : fd_kvmvm [ioctl$KVM_CREATE_VM] syz_memcpy_off$KVM_EXIT_HYPERCALL : kvm_run_ptr [mmap$KVM_VCPU] syz_memcpy_off$KVM_EXIT_MMIO : kvm_run_ptr [mmap$KVM_VCPU] write$ALLOC_MW : fd_rdma [openat$uverbs0] write$ALLOC_PD : fd_rdma [openat$uverbs0] write$ATTACH_MCAST : fd_rdma [openat$uverbs0] write$CLOSE_XRCD : fd_rdma [openat$uverbs0] write$CREATE_AH : fd_rdma [openat$uverbs0] write$CREATE_COMP_CHANNEL : fd_rdma [openat$uverbs0] write$CREATE_CQ : fd_rdma [openat$uverbs0] write$CREATE_CQ_EX : fd_rdma [openat$uverbs0] write$CREATE_FLOW : fd_rdma [openat$uverbs0] write$CREATE_QP : fd_rdma [openat$uverbs0] write$CREATE_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$CREATE_SRQ : fd_rdma [openat$uverbs0] write$CREATE_WQ : fd_rdma [openat$uverbs0] write$DEALLOC_MW : fd_rdma [openat$uverbs0] write$DEALLOC_PD : fd_rdma [openat$uverbs0] write$DEREG_MR : fd_rdma [openat$uverbs0] write$DESTROY_AH : fd_rdma [openat$uverbs0] write$DESTROY_CQ : fd_rdma [openat$uverbs0] write$DESTROY_FLOW : fd_rdma [openat$uverbs0] write$DESTROY_QP : fd_rdma [openat$uverbs0] write$DESTROY_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$DESTROY_SRQ : fd_rdma [openat$uverbs0] write$DESTROY_WQ : fd_rdma [openat$uverbs0] write$DETACH_MCAST : fd_rdma [openat$uverbs0] write$MLX5_ALLOC_PD : fd_rdma [openat$uverbs0] write$MLX5_CREATE_CQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_DV_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_SRQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_WQ : fd_rdma [openat$uverbs0] write$MLX5_GET_CONTEXT : fd_rdma [openat$uverbs0] write$MLX5_MODIFY_WQ : fd_rdma [openat$uverbs0] write$MODIFY_QP : fd_rdma [openat$uverbs0] write$MODIFY_SRQ : fd_rdma [openat$uverbs0] write$OPEN_XRCD : fd_rdma [openat$uverbs0] write$POLL_CQ : fd_rdma [openat$uverbs0] write$POST_RECV : fd_rdma [openat$uverbs0] write$POST_SEND : fd_rdma [openat$uverbs0] write$POST_SRQ_RECV : fd_rdma [openat$uverbs0] write$QUERY_DEVICE_EX : fd_rdma [openat$uverbs0] write$QUERY_PORT : fd_rdma [openat$uverbs0] write$QUERY_QP : fd_rdma [openat$uverbs0] write$QUERY_SRQ : fd_rdma [openat$uverbs0] write$REG_MR : fd_rdma [openat$uverbs0] write$REQ_NOTIFY_CQ : fd_rdma [openat$uverbs0] write$REREG_MR : fd_rdma [openat$uverbs0] write$RESIZE_CQ : fd_rdma [openat$uverbs0] write$capi20 : fd_capi20 [openat$capi20] write$capi20_data : fd_capi20 [openat$capi20] write$damon_attrs : fd_damon_attrs [openat$damon_attrs] write$damon_contexts : fd_damon_contexts [openat$damon_mk_contexts openat$damon_rm_contexts] write$damon_init_regions : fd_damon_init_regions [openat$damon_init_regions] write$damon_monitor_on : fd_damon_monitor_on [openat$damon_monitor_on] write$damon_schemes : fd_damon_schemes [openat$damon_schemes] write$damon_target_ids : fd_damon_target_ids [openat$damon_target_ids] write$proc_reclaim : fd_proc_reclaim [openat$proc_reclaim] write$sndhw : fd_snd_hw [syz_open_dev$sndhw] write$sndhw_fireworks : fd_snd_hw [syz_open_dev$sndhw] write$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] write$trusty_avb : fd_trusty_avb [openat$trusty_avb] write$trusty_gatekeeper : fd_trusty_gatekeeper [openat$trusty_gatekeeper] write$trusty_hwkey : fd_trusty_hwkey [openat$trusty_hwkey] write$trusty_hwrng : fd_trusty_hwrng [openat$trusty_hwrng] write$trusty_km : fd_trusty_km [openat$trusty_km] write$trusty_km_secure : fd_trusty_km_secure [openat$trusty_km_secure] write$trusty_storage : fd_trusty_storage [openat$trusty_storage] BinFmtMisc : enabled Comparisons : enabled Coverage : enabled DelayKcovMmap : enabled DevlinkPCI : PCI device 0000:00:10.0 is not available ExtraCoverage : enabled Fault : enabled KCSAN : write(/sys/kernel/debug/kcsan, on) failed KcovResetIoctl : kernel does not support ioctl(KCOV_RESET_TRACE) LRWPANEmulation : enabled Leak : failed to write(kmemleak, "scan=off") NetDevices : enabled NetInjection : enabled NicVF : PCI device 0000:00:11.0 is not available SandboxAndroid : setfilecon: setxattr failed. (errno 1: Operation not permitted). . process exited with status 67. SandboxNamespace : enabled SandboxNone : enabled SandboxSetuid : enabled Swap : enabled USBEmulation : enabled VhciInjection : enabled WifiEmulation : enabled syscalls : 3838/8054 2025/08/27 17:20:43 new: machine check complete 2025/08/27 17:20:43 new: adding 80039 seeds 2025/08/27 17:20:45 machine check: disabled the following syscalls: fsetxattr$security_selinux : selinux is not enabled fsetxattr$security_smack_transmute : smack is not enabled fsetxattr$smack_xattr_label : smack is not enabled get_thread_area : syscall get_thread_area is not present lookup_dcookie : syscall lookup_dcookie is not present lsetxattr$security_selinux : selinux is not enabled lsetxattr$security_smack_transmute : smack is not enabled lsetxattr$smack_xattr_label : smack is not enabled mount$esdfs : /proc/filesystems does not contain esdfs mount$incfs : /proc/filesystems does not contain incremental-fs openat$acpi_thermal_rel : failed to open /dev/acpi_thermal_rel: no such file or directory openat$ashmem : failed to open /dev/ashmem: no such file or directory openat$bifrost : failed to open /dev/bifrost: no such file or directory openat$binder : failed to open /dev/binder: no such file or directory openat$camx : failed to open /dev/v4l/by-path/platform-soc@0:qcom_cam-req-mgr-video-index0: no such file or directory openat$capi20 : failed to open /dev/capi20: no such file or directory openat$cdrom1 : failed to open /dev/cdrom1: no such file or directory openat$damon_attrs : failed to open /sys/kernel/debug/damon/attrs: no such file or directory openat$damon_init_regions : failed to open /sys/kernel/debug/damon/init_regions: no such file or directory openat$damon_kdamond_pid : failed to open /sys/kernel/debug/damon/kdamond_pid: no such file or directory openat$damon_mk_contexts : failed to open /sys/kernel/debug/damon/mk_contexts: no such file or directory openat$damon_monitor_on : failed to open /sys/kernel/debug/damon/monitor_on: no such file or directory openat$damon_rm_contexts : failed to open /sys/kernel/debug/damon/rm_contexts: no such file or directory openat$damon_schemes : failed to open /sys/kernel/debug/damon/schemes: no such file or directory openat$damon_target_ids : failed to open /sys/kernel/debug/damon/target_ids: no such file or directory openat$hwbinder : failed to open /dev/hwbinder: no such file or directory openat$i915 : failed to open /dev/i915: no such file or directory openat$img_rogue : failed to open /dev/img-rogue: no such file or directory openat$irnet : failed to open /dev/irnet: no such file or directory openat$keychord : failed to open /dev/keychord: no such file or directory openat$kvm : failed to open /dev/kvm: no such file or directory openat$lightnvm : failed to open /dev/lightnvm/control: no such file or directory openat$mali : failed to open /dev/mali0: no such file or directory openat$md : failed to open /dev/md0: no such file or directory openat$msm : failed to open /dev/msm: no such file or directory openat$ndctl0 : failed to open /dev/ndctl0: no such file or directory openat$nmem0 : failed to open /dev/nmem0: no such file or directory openat$pktcdvd : failed to open /dev/pktcdvd/control: no such file or directory openat$pmem0 : failed to open /dev/pmem0: no such file or directory openat$proc_capi20 : failed to open /proc/capi/capi20: no such file or directory openat$proc_capi20ncci : failed to open /proc/capi/capi20ncci: no such file or directory openat$proc_reclaim : failed to open /proc/self/reclaim: no such file or directory openat$ptp1 : failed to open /dev/ptp1: no such file or directory openat$rnullb : failed to open /dev/rnullb0: no such file or directory openat$selinux_access : failed to open /selinux/access: no such file or directory openat$selinux_attr : selinux is not enabled openat$selinux_avc_cache_stats : failed to open /selinux/avc/cache_stats: no such file or directory openat$selinux_avc_cache_threshold : failed to open /selinux/avc/cache_threshold: no such file or directory openat$selinux_avc_hash_stats : failed to open /selinux/avc/hash_stats: no such file or directory openat$selinux_checkreqprot : failed to open /selinux/checkreqprot: no such file or directory openat$selinux_commit_pending_bools : failed to open /selinux/commit_pending_bools: no such file or directory openat$selinux_context : failed to open /selinux/context: no such file or directory openat$selinux_create : failed to open /selinux/create: no such file or directory openat$selinux_enforce : failed to open /selinux/enforce: no such file or directory openat$selinux_load : failed to open /selinux/load: no such file or directory openat$selinux_member : failed to open /selinux/member: no such file or directory openat$selinux_mls : failed to open /selinux/mls: no such file or directory openat$selinux_policy : failed to open /selinux/policy: no such file or directory openat$selinux_relabel : failed to open /selinux/relabel: no such file or directory openat$selinux_status : failed to open /selinux/status: no such file or directory openat$selinux_user : failed to open /selinux/user: no such file or directory openat$selinux_validatetrans : failed to open /selinux/validatetrans: no such file or directory openat$sev : failed to open /dev/sev: no such file or directory openat$sgx_provision : failed to open /dev/sgx_provision: no such file or directory openat$smack_task_current : smack is not enabled openat$smack_thread_current : smack is not enabled openat$smackfs_access : failed to open /sys/fs/smackfs/access: no such file or directory openat$smackfs_ambient : failed to open /sys/fs/smackfs/ambient: no such file or directory openat$smackfs_change_rule : failed to open /sys/fs/smackfs/change-rule: no such file or directory openat$smackfs_cipso : failed to open /sys/fs/smackfs/cipso: no such file or directory openat$smackfs_cipsonum : failed to open /sys/fs/smackfs/direct: no such file or directory openat$smackfs_ipv6host : failed to open /sys/fs/smackfs/ipv6host: no such file or directory openat$smackfs_load : failed to open /sys/fs/smackfs/load: no such file or directory openat$smackfs_logging : failed to open /sys/fs/smackfs/logging: no such file or directory openat$smackfs_netlabel : failed to open /sys/fs/smackfs/netlabel: no such file or directory openat$smackfs_onlycap : failed to open /sys/fs/smackfs/onlycap: no such file or directory openat$smackfs_ptrace : failed to open /sys/fs/smackfs/ptrace: no such file or directory openat$smackfs_relabel_self : failed to open /sys/fs/smackfs/relabel-self: no such file or directory openat$smackfs_revoke_subject : failed to open /sys/fs/smackfs/revoke-subject: no such file or directory openat$smackfs_syslog : failed to open /sys/fs/smackfs/syslog: no such file or directory openat$smackfs_unconfined : failed to open /sys/fs/smackfs/unconfined: no such file or directory openat$tlk_device : failed to open /dev/tlk_device: no such file or directory openat$trusty : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_avb : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_gatekeeper : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwkey : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_hwrng : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_km_secure : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$trusty_storage : failed to open /dev/trusty-ipc-dev0: no such file or directory openat$tty : failed to open /dev/tty: no such device or address openat$uverbs0 : failed to open /dev/infiniband/uverbs0: no such file or directory openat$vfio : failed to open /dev/vfio/vfio: no such file or directory openat$vndbinder : failed to open /dev/vndbinder: no such file or directory openat$vtpm : failed to open /dev/vtpmx: no such file or directory openat$xenevtchn : failed to open /dev/xen/evtchn: no such file or directory openat$zygote : failed to open /dev/socket/zygote: no such file or directory pkey_alloc : pkey_alloc(0x0, 0x0) failed: no space left on device read$smackfs_access : smack is not enabled read$smackfs_cipsonum : smack is not enabled read$smackfs_logging : smack is not enabled read$smackfs_ptrace : smack is not enabled set_thread_area : syscall set_thread_area is not present setxattr$security_selinux : selinux is not enabled setxattr$security_smack_transmute : smack is not enabled setxattr$smack_xattr_label : smack is not enabled socket$hf : socket$hf(0x13, 0x2, 0x0) failed: address family not supported by protocol socket$inet6_dccp : socket$inet6_dccp(0xa, 0x6, 0x0) failed: socket type not supported socket$inet_dccp : socket$inet_dccp(0x2, 0x6, 0x0) failed: socket type not supported socket$vsock_dgram : socket$vsock_dgram(0x28, 0x2, 0x0) failed: no such device syz_btf_id_by_name$bpf_lsm : failed to open /sys/kernel/btf/vmlinux: no such file or directory syz_init_net_socket$bt_cmtp : syz_init_net_socket$bt_cmtp(0x1f, 0x3, 0x5) failed: protocol not supported syz_kvm_setup_cpu$ppc64 : unsupported arch syz_mount_image$ntfs : /proc/filesystems does not contain ntfs syz_mount_image$reiserfs : /proc/filesystems does not contain reiserfs syz_mount_image$sysv : /proc/filesystems does not contain sysv syz_mount_image$v7 : /proc/filesystems does not contain v7 syz_open_dev$dricontrol : failed to open /dev/dri/controlD#: no such file or directory syz_open_dev$drirender : failed to open /dev/dri/renderD#: no such file or directory syz_open_dev$floppy : failed to open /dev/fd#: no such file or directory syz_open_dev$ircomm : failed to open /dev/ircomm#: no such file or directory syz_open_dev$sndhw : failed to open /dev/snd/hwC#D#: no such file or directory syz_pkey_set : pkey_alloc(0x0, 0x0) failed: no space left on device uselib : syscall uselib is not present write$selinux_access : selinux is not enabled write$selinux_attr : selinux is not enabled write$selinux_context : selinux is not enabled write$selinux_create : selinux is not enabled write$selinux_load : selinux is not enabled write$selinux_user : selinux is not enabled write$selinux_validatetrans : selinux is not enabled write$smack_current : smack is not enabled write$smackfs_access : smack is not enabled write$smackfs_change_rule : smack is not enabled write$smackfs_cipso : smack is not enabled write$smackfs_cipsonum : smack is not enabled write$smackfs_ipv6host : smack is not enabled write$smackfs_label : smack is not enabled write$smackfs_labels_list : smack is not enabled write$smackfs_load : smack is not enabled write$smackfs_logging : smack is not enabled write$smackfs_netlabel : smack is not enabled write$smackfs_ptrace : smack is not enabled transitively disabled the following syscalls (missing resource [creating syscalls]): bind$vsock_dgram : sock_vsock_dgram [socket$vsock_dgram] close$ibv_device : fd_rdma [openat$uverbs0] connect$hf : sock_hf [socket$hf] connect$vsock_dgram : sock_vsock_dgram [socket$vsock_dgram] getsockopt$inet6_dccp_buf : sock_dccp6 [socket$inet6_dccp] getsockopt$inet6_dccp_int : sock_dccp6 [socket$inet6_dccp] getsockopt$inet_dccp_buf : sock_dccp [socket$inet_dccp] getsockopt$inet_dccp_int : sock_dccp [socket$inet_dccp] ioctl$ACPI_THERMAL_GET_ART : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_ART_COUNT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_ART_LEN : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT_COUNT : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ACPI_THERMAL_GET_TRT_LEN : fd_acpi_thermal_rel [openat$acpi_thermal_rel] ioctl$ASHMEM_GET_NAME : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_PIN_STATUS : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_PROT_MASK : fd_ashmem [openat$ashmem] ioctl$ASHMEM_GET_SIZE : fd_ashmem [openat$ashmem] ioctl$ASHMEM_PURGE_ALL_CACHES : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_NAME : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_PROT_MASK : fd_ashmem [openat$ashmem] ioctl$ASHMEM_SET_SIZE : fd_ashmem [openat$ashmem] ioctl$CAPI_CLR_FLAGS : fd_capi20 [openat$capi20] ioctl$CAPI_GET_ERRCODE : fd_capi20 [openat$capi20] ioctl$CAPI_GET_FLAGS : fd_capi20 [openat$capi20] ioctl$CAPI_GET_MANUFACTURER : fd_capi20 [openat$capi20] ioctl$CAPI_GET_PROFILE : fd_capi20 [openat$capi20] ioctl$CAPI_GET_SERIAL : fd_capi20 [openat$capi20] ioctl$CAPI_INSTALLED : fd_capi20 [openat$capi20] ioctl$CAPI_MANUFACTURER_CMD : fd_capi20 [openat$capi20] ioctl$CAPI_NCCI_GETUNIT : fd_capi20 [openat$capi20] ioctl$CAPI_NCCI_OPENCOUNT : fd_capi20 [openat$capi20] ioctl$CAPI_REGISTER : fd_capi20 [openat$capi20] ioctl$CAPI_SET_FLAGS : fd_capi20 [openat$capi20] ioctl$CREATE_COUNTERS : fd_rdma [openat$uverbs0] ioctl$DESTROY_COUNTERS : fd_rdma [openat$uverbs0] ioctl$DRM_IOCTL_I915_GEM_BUSY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_DESTROY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER2 : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_EXECBUFFER2_WR : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_APERTURE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_CACHING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_GET_TILING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MADVISE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP_GTT : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_MMAP_OFFSET : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PREAD : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_PWRITE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_CACHING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_DOMAIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SET_TILING : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_SW_FINISH : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_THROTTLE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_UNPIN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_USERPTR : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_VM_CREATE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_VM_DESTROY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GEM_WAIT : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GETPARAM : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_GET_RESET_STATS : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_OVERLAY_ATTRS : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_OVERLAY_PUT_IMAGE : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_ADD_CONFIG : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_OPEN : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_PERF_REMOVE_CONFIG : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_QUERY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_REG_READ : fd_i915 [openat$i915] ioctl$DRM_IOCTL_I915_SET_SPRITE_COLORKEY : fd_i915 [openat$i915] ioctl$DRM_IOCTL_MSM_GEM_CPU_FINI : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_CPU_PREP : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_INFO : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_MADVISE : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_NEW : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GEM_SUBMIT : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_GET_PARAM : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SET_PARAM : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_NEW : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_SUBMITQUEUE_QUERY : fd_msm [openat$msm] ioctl$DRM_IOCTL_MSM_WAIT_FENCE : fd_msm [openat$msm] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPEXEC: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_HTBUFFER_HTBLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_CHANGESPARSEMEM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTPIN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMREXPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRGETUID: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRIMPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNREFPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_PVRTL_TLWRITEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_CONNECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_DISCONNECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE: fd_rogue [openat$img_rogue] ioctl$DRM_IOCTL_PVR_SRVKM_CMD_PVRSRV_BRIDGE_SYNC_SYNCPRIMSET: fd_rogue [openat$img_rogue] ioctl$FLOPPY_FDCLRPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDDEFPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDEJECT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFLUSH : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTBEG : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTEND : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDFMTTRK : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETDRVTYP : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETFDCSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETMAXERRS : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDGETPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDMSGOFF : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDMSGON : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDPOLLDRVSTAT : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDRAWCMD : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDRESET : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETDRVPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETEMSGTRESH : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETMAXERRS : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDSETPRM : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDTWADDLE : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDWERRORCLR : fd_floppy [syz_open_dev$floppy] ioctl$FLOPPY_FDWERRORGET : fd_floppy [syz_open_dev$floppy] ioctl$KBASE_HWCNT_READER_CLEAR : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_DISABLE_EVENT : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_DUMP : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_ENABLE_EVENT : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_API_VERSION : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_API_VERSION_WITH_FEATURES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER_SIZE : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_BUFFER_WITH_CYCLES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_GET_HWVER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_PUT_BUFFER : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_PUT_BUFFER_WITH_CYCLES: fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_HWCNT_READER_SET_INTERVAL : fd_hwcnt [ioctl$KBASE_IOCTL_HWCNT_READER_SETUP] ioctl$KBASE_IOCTL_BUFFER_LIVENESS_UPDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CONTEXT_PRIORITY_CHECK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_CPU_QUEUE_DUMP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_EVENT_SIGNAL : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_GET_GLB_IFACE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_BIND : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_KICK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_REGISTER : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_REGISTER_EX : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_QUEUE_TERMINATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_CS_TILER_HEAP_TERM : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_DISJOINT_QUERY : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_FENCE_VALIDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_CONTEXT_ID : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_CPU_GPU_TIMEINFO : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_DDK_VERSION : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_GET_GPUPROPS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_CLEAR : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_DUMP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_ENABLE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_READER_SETUP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_HWCNT_SET : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_JOB_SUBMIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_DELETE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KCPU_QUEUE_ENQUEUE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_CMD : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_GET_SAMPLE : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_PUT_SAMPLE : fd_kinstr [ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP] ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALIAS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALLOC : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_ALLOC_EX : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_COMMIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_EXEC_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FIND_CPU_OFFSET : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET: fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FLAGS_CHANGE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_FREE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_IMPORT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT_10_2 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_JIT_INIT_11_5 : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_PROFILE_ADD : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_QUERY : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_MEM_SYNC : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_POST_TERM : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_READ_USER_PAGE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SET_FLAGS : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SET_LIMITED_CORE_COUNT : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_SOFT_EVENT_UPDATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STICKY_RESOURCE_MAP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STICKY_RESOURCE_UNMAP : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_STREAM_CREATE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_TLSTREAM_ACQUIRE : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_TLSTREAM_FLUSH : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_VERSION_CHECK : fd_bifrost [openat$bifrost openat$mali] ioctl$KBASE_IOCTL_VERSION_CHECK_RESERVED : fd_bifrost [openat$bifrost openat$mali] ioctl$KVM_ASSIGN_SET_MSIX_ENTRY : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_ASSIGN_SET_MSIX_NR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DIRTY_LOG_RING : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DIRTY_LOG_RING_ACQ_REL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DISABLE_QUIRKS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_DISABLE_QUIRKS2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_ENFORCE_PV_FEATURE_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_EXCEPTION_PAYLOAD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_EXIT_HYPERCALL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_EXIT_ON_EMULATION_FAILURE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HALT_POLL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_DIRECT_TLBFLUSH : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_ENFORCE_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_ENLIGHTENED_VMCS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_SEND_IPI : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_SYNIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_SYNIC2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_HYPERV_TLBFLUSH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_HYPERV_VP_INDEX : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MAX_VCPU_ID : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MEMORY_FAULT_INFO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_MSR_PLATFORM_INFO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_PMU_CAPABILITY : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_PTP_KVM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SGX_ATTRIBUTE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SPLIT_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_STEAL_TIME : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_SYNC_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_CAP_VM_COPY_ENC_CONTEXT_FROM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_DISABLE_NX_HUGE_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_VM_TYPES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X2APIC_API : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_APIC_BUS_CYCLES_NS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_BUS_LOCK_EXIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_DISABLE_EXITS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_GUEST_MODE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_NOTIFY_VMEXIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_X86_USER_SPACE_MSR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CAP_XEN_HVM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CHECK_EXTENSION : fd_kvm [openat$kvm] ioctl$KVM_CHECK_EXTENSION_VM : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CLEAR_DIRTY_LOG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_DEVICE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_GUEST_MEMFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_VCPU : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_CREATE_VM : fd_kvm [openat$kvm] ioctl$KVM_DIRTY_TLB : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_API_VERSION : fd_kvm [openat$kvm] ioctl$KVM_GET_CLOCK : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_CPUID2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEBUGREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_GET_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_DIRTY_LOG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_EMULATED_CPUID : fd_kvm [openat$kvm] ioctl$KVM_GET_FPU : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_LAPIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MP_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MSRS_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_MSRS_sys : fd_kvm [openat$kvm] ioctl$KVM_GET_MSR_FEATURE_INDEX_LIST : fd_kvm [openat$kvm] ioctl$KVM_GET_MSR_INDEX_LIST : fd_kvm [openat$kvm] ioctl$KVM_GET_NESTED_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_NR_MMU_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_ONE_REG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_PIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_REG_LIST : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SREGS2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_STATS_FD_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_STATS_FD_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_SUPPORTED_CPUID : fd_kvm [openat$kvm] ioctl$KVM_GET_SUPPORTED_HV_CPUID_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_SUPPORTED_HV_CPUID_sys : fd_kvm [openat$kvm] ioctl$KVM_GET_TSC_KHZ_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_TSC_KHZ_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_GET_VCPU_EVENTS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_VCPU_MMAP_SIZE : fd_kvm [openat$kvm] ioctl$KVM_GET_XCRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_XSAVE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_GET_XSAVE2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_HAS_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_HAS_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_HAS_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_HYPERV_EVENTFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_INTERRUPT : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_IOEVENTFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQFD : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQ_LINE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_IRQ_LINE_STATUS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_KVMCLOCK_CTRL : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_MEMORY_ENCRYPT_REG_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_MEMORY_ENCRYPT_UNREG_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_NMI : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_PPC_ALLOCATE_HTAB : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_PRE_FAULT_MEMORY : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_REGISTER_COALESCED_MMIO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_REINJECT_CONTROL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_RESET_DIRTY_RINGS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_RUN : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_S390_VCPU_FAULT : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_BOOT_CPU_ID : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_CLOCK : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_CPUID : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_CPUID2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEBUGREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEVICE_ATTR : fd_kvmdev [ioctl$KVM_CREATE_DEVICE] ioctl$KVM_SET_DEVICE_ATTR_vcpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_DEVICE_ATTR_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_FPU : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_GSI_ROUTING : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_GUEST_DEBUG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_IDENTITY_MAP_ADDR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_IRQCHIP : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_LAPIC : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_MEMORY_ATTRIBUTES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_MP_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_MSRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_NESTED_STATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_NR_MMU_PAGES : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_ONE_REG : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_PIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_PIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_REGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SIGNAL_MASK : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SREGS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_SREGS2 : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_TSC_KHZ_cpu : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_TSC_KHZ_vm : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_TSS_ADDR : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_USER_MEMORY_REGION : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_USER_MEMORY_REGION2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SET_VAPIC_ADDR : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_VCPU_EVENTS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_XCRS : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SET_XSAVE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_SEV_CERT_EXPORT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_DBG_DECRYPT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_DBG_ENCRYPT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_ES_INIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_GET_ATTESTATION_REPORT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_GUEST_STATUS : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_INIT : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_INIT2 : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_MEASURE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_SECRET : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_LAUNCH_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_RECEIVE_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_CANCEL : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_UPDATE_DATA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SEND_UPDATE_VMSA : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_FINISH : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_START : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SEV_SNP_LAUNCH_UPDATE : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SIGNAL_MSI : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_SMI : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_TPR_ACCESS_REPORTING : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_TRANSLATE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_UNREGISTER_COALESCED_MMIO : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_X86_GET_MCE_CAP_SUPPORTED : fd_kvm [openat$kvm] ioctl$KVM_X86_SETUP_MCE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_X86_SET_MCE : fd_kvmcpu [ioctl$KVM_CREATE_VCPU syz_kvm_add_vcpu$x86] ioctl$KVM_X86_SET_MSR_FILTER : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$KVM_XEN_HVM_CONFIG : fd_kvmvm [ioctl$KVM_CREATE_VM] ioctl$PERF_EVENT_IOC_DISABLE : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_ENABLE : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_ID : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_MODIFY_ATTRIBUTES : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_PAUSE_OUTPUT : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_PERIOD : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_QUERY_BPF : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_REFRESH : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_RESET : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_BPF : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_FILTER : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$PERF_EVENT_IOC_SET_OUTPUT : fd_perf [perf_event_open perf_event_open$cgroup] ioctl$READ_COUNTERS : fd_rdma [openat$uverbs0] ioctl$SNDRV_FIREWIRE_IOCTL_GET_INFO : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_LOCK : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_TASCAM_STATE : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_FIREWIRE_IOCTL_UNLOCK : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_DSP_LOAD : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_DSP_STATUS : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_INFO : fd_snd_hw [syz_open_dev$sndhw] ioctl$SNDRV_HWDEP_IOCTL_PVERSION : fd_snd_hw [syz_open_dev$sndhw] ioctl$TE_IOCTL_CLOSE_CLIENT_SESSION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_LAUNCH_OPERATION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_OPEN_CLIENT_SESSION : fd_tlk [openat$tlk_device] ioctl$TE_IOCTL_SS_CMD : fd_tlk [openat$tlk_device] ioctl$TIPC_IOC_CONNECT : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] ioctl$TIPC_IOC_CONNECT_avb : fd_trusty_avb [openat$trusty_avb] ioctl$TIPC_IOC_CONNECT_gatekeeper : fd_trusty_gatekeeper [openat$trusty_gatekeeper] ioctl$TIPC_IOC_CONNECT_hwkey : fd_trusty_hwkey [openat$trusty_hwkey] ioctl$TIPC_IOC_CONNECT_hwrng : fd_trusty_hwrng [openat$trusty_hwrng] ioctl$TIPC_IOC_CONNECT_keymaster_secure : fd_trusty_km_secure [openat$trusty_km_secure] ioctl$TIPC_IOC_CONNECT_km : fd_trusty_km [openat$trusty_km] ioctl$TIPC_IOC_CONNECT_storage : fd_trusty_storage [openat$trusty_storage] ioctl$VFIO_CHECK_EXTENSION : fd_vfio [openat$vfio] ioctl$VFIO_GET_API_VERSION : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_GET_INFO : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_MAP_DMA : fd_vfio [openat$vfio] ioctl$VFIO_IOMMU_UNMAP_DMA : fd_vfio [openat$vfio] ioctl$VFIO_SET_IOMMU : fd_vfio [openat$vfio] ioctl$VTPM_PROXY_IOC_NEW_DEV : fd_vtpm [openat$vtpm] ioctl$sock_bt_cmtp_CMTPCONNADD : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPCONNDEL : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPGETCONNINFO : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] ioctl$sock_bt_cmtp_CMTPGETCONNLIST : sock_bt_cmtp [syz_init_net_socket$bt_cmtp] mmap$DRM_I915 : fd_i915 [openat$i915] mmap$DRM_MSM : fd_msm [openat$msm] mmap$KVM_VCPU : vcpu_mmap_size [ioctl$KVM_GET_VCPU_MMAP_SIZE] mmap$bifrost : fd_bifrost [openat$bifrost openat$mali] mmap$perf : fd_perf [perf_event_open perf_event_open$cgroup] pkey_free : pkey [pkey_alloc] pkey_mprotect : pkey [pkey_alloc] read$sndhw : fd_snd_hw [syz_open_dev$sndhw] read$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] recvmsg$hf : sock_hf [socket$hf] sendmsg$hf : sock_hf [socket$hf] setsockopt$inet6_dccp_buf : sock_dccp6 [socket$inet6_dccp] setsockopt$inet6_dccp_int : sock_dccp6 [socket$inet6_dccp] setsockopt$inet_dccp_buf : sock_dccp [socket$inet_dccp] setsockopt$inet_dccp_int : sock_dccp [socket$inet_dccp] syz_kvm_add_vcpu$x86 : kvm_syz_vm$x86 [syz_kvm_setup_syzos_vm$x86] syz_kvm_assert_syzos_uexit$x86 : kvm_run_ptr [mmap$KVM_VCPU] syz_kvm_setup_cpu$x86 : fd_kvmvm [ioctl$KVM_CREATE_VM] syz_kvm_setup_syzos_vm$x86 : fd_kvmvm [ioctl$KVM_CREATE_VM] syz_memcpy_off$KVM_EXIT_HYPERCALL : kvm_run_ptr [mmap$KVM_VCPU] syz_memcpy_off$KVM_EXIT_MMIO : kvm_run_ptr [mmap$KVM_VCPU] write$ALLOC_MW : fd_rdma [openat$uverbs0] write$ALLOC_PD : fd_rdma [openat$uverbs0] write$ATTACH_MCAST : fd_rdma [openat$uverbs0] write$CLOSE_XRCD : fd_rdma [openat$uverbs0] write$CREATE_AH : fd_rdma [openat$uverbs0] write$CREATE_COMP_CHANNEL : fd_rdma [openat$uverbs0] write$CREATE_CQ : fd_rdma [openat$uverbs0] write$CREATE_CQ_EX : fd_rdma [openat$uverbs0] write$CREATE_FLOW : fd_rdma [openat$uverbs0] write$CREATE_QP : fd_rdma [openat$uverbs0] write$CREATE_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$CREATE_SRQ : fd_rdma [openat$uverbs0] write$CREATE_WQ : fd_rdma [openat$uverbs0] write$DEALLOC_MW : fd_rdma [openat$uverbs0] write$DEALLOC_PD : fd_rdma [openat$uverbs0] write$DEREG_MR : fd_rdma [openat$uverbs0] write$DESTROY_AH : fd_rdma [openat$uverbs0] write$DESTROY_CQ : fd_rdma [openat$uverbs0] write$DESTROY_FLOW : fd_rdma [openat$uverbs0] write$DESTROY_QP : fd_rdma [openat$uverbs0] write$DESTROY_RWQ_IND_TBL : fd_rdma [openat$uverbs0] write$DESTROY_SRQ : fd_rdma [openat$uverbs0] write$DESTROY_WQ : fd_rdma [openat$uverbs0] write$DETACH_MCAST : fd_rdma [openat$uverbs0] write$MLX5_ALLOC_PD : fd_rdma [openat$uverbs0] write$MLX5_CREATE_CQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_DV_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_QP : fd_rdma [openat$uverbs0] write$MLX5_CREATE_SRQ : fd_rdma [openat$uverbs0] write$MLX5_CREATE_WQ : fd_rdma [openat$uverbs0] write$MLX5_GET_CONTEXT : fd_rdma [openat$uverbs0] write$MLX5_MODIFY_WQ : fd_rdma [openat$uverbs0] write$MODIFY_QP : fd_rdma [openat$uverbs0] write$MODIFY_SRQ : fd_rdma [openat$uverbs0] write$OPEN_XRCD : fd_rdma [openat$uverbs0] write$POLL_CQ : fd_rdma [openat$uverbs0] write$POST_RECV : fd_rdma [openat$uverbs0] write$POST_SEND : fd_rdma [openat$uverbs0] write$POST_SRQ_RECV : fd_rdma [openat$uverbs0] write$QUERY_DEVICE_EX : fd_rdma [openat$uverbs0] write$QUERY_PORT : fd_rdma [openat$uverbs0] write$QUERY_QP : fd_rdma [openat$uverbs0] write$QUERY_SRQ : fd_rdma [openat$uverbs0] write$REG_MR : fd_rdma [openat$uverbs0] write$REQ_NOTIFY_CQ : fd_rdma [openat$uverbs0] write$REREG_MR : fd_rdma [openat$uverbs0] write$RESIZE_CQ : fd_rdma [openat$uverbs0] write$capi20 : fd_capi20 [openat$capi20] write$capi20_data : fd_capi20 [openat$capi20] write$damon_attrs : fd_damon_attrs [openat$damon_attrs] write$damon_contexts : fd_damon_contexts [openat$damon_mk_contexts openat$damon_rm_contexts] write$damon_init_regions : fd_damon_init_regions [openat$damon_init_regions] write$damon_monitor_on : fd_damon_monitor_on [openat$damon_monitor_on] write$damon_schemes : fd_damon_schemes [openat$damon_schemes] write$damon_target_ids : fd_damon_target_ids [openat$damon_target_ids] write$proc_reclaim : fd_proc_reclaim [openat$proc_reclaim] write$sndhw : fd_snd_hw [syz_open_dev$sndhw] write$sndhw_fireworks : fd_snd_hw [syz_open_dev$sndhw] write$trusty : fd_trusty [openat$trusty openat$trusty_avb openat$trusty_gatekeeper ...] write$trusty_avb : fd_trusty_avb [openat$trusty_avb] write$trusty_gatekeeper : fd_trusty_gatekeeper [openat$trusty_gatekeeper] write$trusty_hwkey : fd_trusty_hwkey [openat$trusty_hwkey] write$trusty_hwrng : fd_trusty_hwrng [openat$trusty_hwrng] write$trusty_km : fd_trusty_km [openat$trusty_km] write$trusty_km_secure : fd_trusty_km_secure [openat$trusty_km_secure] write$trusty_storage : fd_trusty_storage [openat$trusty_storage] BinFmtMisc : enabled Comparisons : enabled Coverage : enabled DelayKcovMmap : enabled DevlinkPCI : PCI device 0000:00:10.0 is not available ExtraCoverage : enabled Fault : enabled KCSAN : write(/sys/kernel/debug/kcsan, on) failed KcovResetIoctl : kernel does not support ioctl(KCOV_RESET_TRACE) LRWPANEmulation : enabled Leak : failed to write(kmemleak, "scan=off") NetDevices : enabled NetInjection : enabled NicVF : PCI device 0000:00:11.0 is not available SandboxAndroid : setfilecon: setxattr failed. (errno 1: Operation not permitted). . process exited with status 67. SandboxNamespace : enabled SandboxNone : enabled SandboxSetuid : enabled Swap : enabled USBEmulation : enabled VhciInjection : enabled WifiEmulation : enabled syscalls : 3838/8054 2025/08/27 17:20:45 base: machine check complete 2025/08/27 17:23:39 base crash "KASAN: slab-use-after-free Read in xfrm_alloc_spi" is already known 2025/08/27 17:23:39 patched crashed: KASAN: slab-use-after-free Read in xfrm_alloc_spi [need repro = false] 2025/08/27 17:24:11 base crash "general protection fault in pcl818_ai_cancel" is already known 2025/08/27 17:24:11 patched crashed: general protection fault in pcl818_ai_cancel [need repro = false] 2025/08/27 17:24:23 base crash: general protection fault in pcl818_ai_cancel 2025/08/27 17:24:27 runner 5 connected 2025/08/27 17:24:29 patched crashed: general protection fault in pcl818_ai_cancel [need repro = false] 2025/08/27 17:24:38 STAT { "buffer too small": 0, "candidate triage jobs": 48, "candidates": 74394, "comps overflows": 0, "corpus": 5538, "corpus [files]": 0, "corpus [symbols]": 4214, "cover overflows": 4071, "coverage": 173625, "distributor delayed": 5174, "distributor undelayed": 5172, "distributor violated": 1, "exec candidate": 5645, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 0, "exec seeds": 0, "exec smash": 0, "exec total [base]": 11469, "exec total [new]": 26107, "exec triage": 17942, "executor restarts [base]": 59, "executor restarts [new]": 97, "fault jobs": 0, "fuzzer jobs": 48, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 8, "hints jobs": 0, "max signal": 177123, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 5645, "no exec duration": 42293000000, "no exec requests": 322, "pending": 0, "prog exec time": 302, "reproducing": 0, "rpc recv": 1322076720, "rpc sent": 129665128, "signal": 170550, "smash jobs": 0, "triage jobs": 0, "vm output": 2178170, "vm restarts [base]": 4, "vm restarts [new]": 11 } 2025/08/27 17:25:01 runner 4 connected 2025/08/27 17:25:03 patched crashed: INFO: rcu detected stall in worker_thread [need repro = false] 2025/08/27 17:25:12 runner 0 connected 2025/08/27 17:25:18 runner 9 connected 2025/08/27 17:25:24 base crash: KASAN: slab-use-after-free Read in xfrm_alloc_spi 2025/08/27 17:25:50 base crash "KASAN: slab-use-after-free Read in __xfrm_state_lookup" is already known 2025/08/27 17:25:50 patched crashed: KASAN: slab-use-after-free Read in __xfrm_state_lookup [need repro = false] 2025/08/27 17:25:59 runner 0 connected 2025/08/27 17:26:00 base crash "KASAN: slab-use-after-free Read in jfs_lazycommit" is already known 2025/08/27 17:26:00 patched crashed: KASAN: slab-use-after-free Read in jfs_lazycommit [need repro = false] 2025/08/27 17:26:12 runner 2 connected 2025/08/27 17:26:34 patched crashed: KASAN: slab-use-after-free Read in xfrm_alloc_spi [need repro = false] 2025/08/27 17:26:39 runner 6 connected 2025/08/27 17:26:56 runner 9 connected 2025/08/27 17:27:02 base crash: WARNING in io_ring_exit_work 2025/08/27 17:27:30 runner 8 connected 2025/08/27 17:27:51 runner 3 connected 2025/08/27 17:28:11 base crash: general protection fault in pcl818_ai_cancel 2025/08/27 17:28:40 base crash "kernel BUG in jfs_evict_inode" is already known 2025/08/27 17:28:40 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/08/27 17:29:08 runner 1 connected 2025/08/27 17:29:38 STAT { "buffer too small": 0, "candidate triage jobs": 58, "candidates": 67839, "comps overflows": 0, "corpus": 12031, "corpus [files]": 0, "corpus [symbols]": 7871, "cover overflows": 8428, "coverage": 217419, "distributor delayed": 11155, "distributor undelayed": 11155, "distributor violated": 1, "exec candidate": 12200, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 0, "exec seeds": 0, "exec smash": 0, "exec total [base]": 22023, "exec total [new]": 56658, "exec triage": 38423, "executor restarts [base]": 83, "executor restarts [new]": 142, "fault jobs": 0, "fuzzer jobs": 58, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 9, "hints jobs": 0, "max signal": 220419, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 12200, "no exec duration": 42459000000, "no exec requests": 324, "pending": 0, "prog exec time": 203, "reproducing": 0, "rpc recv": 2506656776, "rpc sent": 280101016, "signal": 212882, "smash jobs": 0, "triage jobs": 0, "vm output": 5084131, "vm restarts [base]": 8, "vm restarts [new]": 17 } 2025/08/27 17:29:38 runner 1 connected 2025/08/27 17:30:01 base crash "unregister_netdevice: waiting for DEV to become free" is already known 2025/08/27 17:30:01 patched crashed: unregister_netdevice: waiting for DEV to become free [need repro = false] 2025/08/27 17:30:57 runner 2 connected 2025/08/27 17:31:29 base crash "WARNING in xfrm_state_fini" is already known 2025/08/27 17:31:29 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 17:32:12 base crash "INFO: trying to register non-static key in ocfs2_dlm_shutdown" is already known 2025/08/27 17:32:12 patched crashed: INFO: trying to register non-static key in ocfs2_dlm_shutdown [need repro = false] 2025/08/27 17:32:19 runner 8 connected 2025/08/27 17:32:56 base crash "unregister_netdevice: waiting for DEV to become free" is already known 2025/08/27 17:32:56 patched crashed: unregister_netdevice: waiting for DEV to become free [need repro = false] 2025/08/27 17:33:05 base crash "WARNING in xfrm_state_fini" is already known 2025/08/27 17:33:05 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 17:33:08 runner 0 connected 2025/08/27 17:33:20 base crash "unregister_netdevice: waiting for DEV to become free" is already known 2025/08/27 17:33:20 patched crashed: unregister_netdevice: waiting for DEV to become free [need repro = false] 2025/08/27 17:33:28 base crash "possible deadlock in ocfs2_try_remove_refcount_tree" is already known 2025/08/27 17:33:28 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 17:33:30 base crash "unregister_netdevice: waiting for DEV to become free" is already known 2025/08/27 17:33:30 patched crashed: unregister_netdevice: waiting for DEV to become free [need repro = false] 2025/08/27 17:33:53 runner 5 connected 2025/08/27 17:33:54 runner 6 connected 2025/08/27 17:34:10 runner 3 connected 2025/08/27 17:34:17 runner 1 connected 2025/08/27 17:34:19 runner 9 connected 2025/08/27 17:34:38 STAT { "buffer too small": 0, "candidate triage jobs": 55, "candidates": 62973, "comps overflows": 0, "corpus": 16836, "corpus [files]": 0, "corpus [symbols]": 10430, "cover overflows": 11481, "coverage": 239246, "distributor delayed": 15929, "distributor undelayed": 15929, "distributor violated": 5, "exec candidate": 17066, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 0, "exec seeds": 0, "exec smash": 0, "exec total [base]": 34152, "exec total [new]": 79089, "exec triage": 53314, "executor restarts [base]": 114, "executor restarts [new]": 242, "fault jobs": 0, "fuzzer jobs": 55, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 10, "hints jobs": 0, "max signal": 241816, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 17066, "no exec duration": 42529000000, "no exec requests": 326, "pending": 0, "prog exec time": 190, "reproducing": 0, "rpc recv": 3631610312, "rpc sent": 440344080, "signal": 234487, "smash jobs": 0, "triage jobs": 0, "vm output": 8839922, "vm restarts [base]": 8, "vm restarts [new]": 26 } 2025/08/27 17:34:59 base crash "WARNING in xfrm_state_fini" is already known 2025/08/27 17:34:59 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 17:35:00 base crash "possible deadlock in ocfs2_try_remove_refcount_tree" is already known 2025/08/27 17:35:00 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 17:35:02 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 17:35:10 base crash "possible deadlock in ocfs2_reserve_local_alloc_bits" is already known 2025/08/27 17:35:10 patched crashed: possible deadlock in ocfs2_reserve_local_alloc_bits [need repro = false] 2025/08/27 17:35:21 base crash "possible deadlock in ocfs2_reserve_local_alloc_bits" is already known 2025/08/27 17:35:21 patched crashed: possible deadlock in ocfs2_reserve_local_alloc_bits [need repro = false] 2025/08/27 17:35:42 base crash "possible deadlock in ocfs2_init_acl" is already known 2025/08/27 17:35:42 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 17:35:49 runner 4 connected 2025/08/27 17:35:50 runner 0 connected 2025/08/27 17:35:55 base crash "possible deadlock in ocfs2_reserve_suballoc_bits" is already known 2025/08/27 17:35:55 patched crashed: possible deadlock in ocfs2_reserve_suballoc_bits [need repro = false] 2025/08/27 17:35:55 runner 7 connected 2025/08/27 17:35:56 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 17:35:59 runner 2 connected 2025/08/27 17:36:10 runner 0 connected 2025/08/27 17:36:31 runner 8 connected 2025/08/27 17:36:38 base crash "kernel BUG in jfs_evict_inode" is already known 2025/08/27 17:36:38 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/08/27 17:36:45 runner 1 connected 2025/08/27 17:36:45 runner 2 connected 2025/08/27 17:37:27 runner 5 connected 2025/08/27 17:38:29 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 17:38:40 base crash "WARNING in xfrm_state_fini" is already known 2025/08/27 17:38:40 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 17:39:19 base crash "WARNING in xfrm_state_fini" is already known 2025/08/27 17:39:19 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 17:39:25 runner 2 connected 2025/08/27 17:39:30 runner 5 connected 2025/08/27 17:39:38 STAT { "buffer too small": 0, "candidate triage jobs": 40, "candidates": 57926, "comps overflows": 0, "corpus": 21834, "corpus [files]": 0, "corpus [symbols]": 13088, "cover overflows": 14531, "coverage": 256131, "distributor delayed": 20695, "distributor undelayed": 20693, "distributor violated": 5, "exec candidate": 22113, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 0, "exec seeds": 0, "exec smash": 0, "exec total [base]": 44495, "exec total [new]": 103154, "exec triage": 68690, "executor restarts [base]": 142, "executor restarts [new]": 335, "fault jobs": 0, "fuzzer jobs": 40, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 8, "hints jobs": 0, "max signal": 258518, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 22113, "no exec duration": 47964000000, "no exec requests": 336, "pending": 0, "prog exec time": 246, "reproducing": 0, "rpc recv": 4740427896, "rpc sent": 566725984, "signal": 251291, "smash jobs": 0, "triage jobs": 0, "vm output": 12658884, "vm restarts [base]": 11, "vm restarts [new]": 34 } 2025/08/27 17:39:58 base crash "WARNING in xfrm_state_fini" is already known 2025/08/27 17:39:58 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 17:40:06 base crash "WARNING in xfrm_state_fini" is already known 2025/08/27 17:40:06 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 17:40:08 runner 8 connected 2025/08/27 17:40:24 patched crashed: KASAN: slab-use-after-free Read in xfrm_alloc_spi [need repro = false] 2025/08/27 17:40:26 patched crashed: KASAN: slab-use-after-free Read in xfrm_alloc_spi [need repro = false] 2025/08/27 17:40:36 patched crashed: KASAN: slab-use-after-free Read in xfrm_alloc_spi [need repro = false] 2025/08/27 17:40:47 runner 0 connected 2025/08/27 17:40:50 base crash: general protection fault in device_move 2025/08/27 17:40:55 runner 9 connected 2025/08/27 17:41:13 runner 2 connected 2025/08/27 17:41:15 runner 7 connected 2025/08/27 17:41:21 patched crashed: KASAN: slab-use-after-free Read in xfrm_alloc_spi [need repro = false] 2025/08/27 17:41:25 runner 3 connected 2025/08/27 17:41:37 base crash: KASAN: slab-use-after-free Read in xfrm_alloc_spi 2025/08/27 17:41:38 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 17:41:39 runner 1 connected 2025/08/27 17:41:46 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 17:42:07 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 17:42:11 runner 6 connected 2025/08/27 17:42:27 runner 3 connected 2025/08/27 17:42:27 runner 1 connected 2025/08/27 17:42:36 runner 0 connected 2025/08/27 17:42:57 runner 9 connected 2025/08/27 17:44:38 STAT { "buffer too small": 0, "candidate triage jobs": 43, "candidates": 52602, "comps overflows": 0, "corpus": 27086, "corpus [files]": 0, "corpus [symbols]": 15588, "cover overflows": 18310, "coverage": 271606, "distributor delayed": 26186, "distributor undelayed": 26186, "distributor violated": 8, "exec candidate": 27437, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 5, "exec seeds": 0, "exec smash": 0, "exec total [base]": 57664, "exec total [new]": 131496, "exec triage": 85093, "executor restarts [base]": 166, "executor restarts [new]": 414, "fault jobs": 0, "fuzzer jobs": 43, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 10, "hints jobs": 0, "max signal": 273868, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 27437, "no exec duration": 47998000000, "no exec requests": 338, "pending": 0, "prog exec time": 177, "reproducing": 0, "rpc recv": 5942783292, "rpc sent": 729091296, "signal": 266646, "smash jobs": 0, "triage jobs": 0, "vm output": 15707549, "vm restarts [base]": 13, "vm restarts [new]": 44 } 2025/08/27 17:45:18 patched crashed: possible deadlock in __netdev_update_features [need repro = true] 2025/08/27 17:45:18 scheduled a reproduction of 'possible deadlock in __netdev_update_features' 2025/08/27 17:46:08 runner 4 connected 2025/08/27 17:47:08 base crash "KASAN: slab-use-after-free Read in xfrm_state_find" is already known 2025/08/27 17:47:08 patched crashed: KASAN: slab-use-after-free Read in xfrm_state_find [need repro = false] 2025/08/27 17:47:57 runner 8 connected 2025/08/27 17:47:59 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 17:48:22 base crash: unregister_netdevice: waiting for DEV to become free 2025/08/27 17:48:55 runner 0 connected 2025/08/27 17:49:11 base crash "WARNING in xfrm6_tunnel_net_exit" is already known 2025/08/27 17:49:11 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/08/27 17:49:11 runner 3 connected 2025/08/27 17:49:25 patched crashed: KASAN: slab-use-after-free Read in xfrm_alloc_spi [need repro = false] 2025/08/27 17:49:38 STAT { "buffer too small": 0, "candidate triage jobs": 35, "candidates": 46664, "comps overflows": 0, "corpus": 32907, "corpus [files]": 0, "corpus [symbols]": 18149, "cover overflows": 22662, "coverage": 284518, "distributor delayed": 31341, "distributor undelayed": 31341, "distributor violated": 8, "exec candidate": 33375, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 5, "exec seeds": 0, "exec smash": 0, "exec total [base]": 71559, "exec total [new]": 166755, "exec triage": 103603, "executor restarts [base]": 187, "executor restarts [new]": 473, "fault jobs": 0, "fuzzer jobs": 35, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 8, "hints jobs": 0, "max signal": 287297, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 33375, "no exec duration": 48358000000, "no exec requests": 341, "pending": 1, "prog exec time": 227, "reproducing": 0, "rpc recv": 6873393460, "rpc sent": 899706168, "signal": 279341, "smash jobs": 0, "triage jobs": 0, "vm output": 18934900, "vm restarts [base]": 14, "vm restarts [new]": 47 } 2025/08/27 17:49:57 base crash: unregister_netdevice: waiting for DEV to become free 2025/08/27 17:50:07 runner 6 connected 2025/08/27 17:50:14 runner 2 connected 2025/08/27 17:50:28 patched crashed: KASAN: slab-use-after-free Read in xfrm_alloc_spi [need repro = false] 2025/08/27 17:50:47 runner 1 connected 2025/08/27 17:51:25 runner 4 connected 2025/08/27 17:51:26 base crash "WARNING in xfrm6_tunnel_net_exit" is already known 2025/08/27 17:51:26 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/08/27 17:51:29 base crash "WARNING in xfrm6_tunnel_net_exit" is already known 2025/08/27 17:51:29 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/08/27 17:51:44 base crash: general protection fault in pcl818_ai_cancel 2025/08/27 17:52:08 base crash "WARNING in xfrm6_tunnel_net_exit" is already known 2025/08/27 17:52:08 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/08/27 17:52:15 base crash "WARNING in xfrm6_tunnel_net_exit" is already known 2025/08/27 17:52:15 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/08/27 17:52:16 runner 2 connected 2025/08/27 17:52:19 base crash: WARNING in xfrm6_tunnel_net_exit 2025/08/27 17:52:21 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 17:52:22 runner 7 connected 2025/08/27 17:52:34 runner 0 connected 2025/08/27 17:53:06 runner 0 connected 2025/08/27 17:53:08 runner 2 connected 2025/08/27 17:53:10 runner 4 connected 2025/08/27 17:53:11 runner 3 connected 2025/08/27 17:54:12 base crash "INFO: task hung in corrupted" is already known 2025/08/27 17:54:12 patched crashed: INFO: task hung in corrupted [need repro = false] 2025/08/27 17:54:14 base crash: possible deadlock in ocfs2_init_acl 2025/08/27 17:54:38 STAT { "buffer too small": 0, "candidate triage jobs": 41, "candidates": 42705, "comps overflows": 0, "corpus": 36783, "corpus [files]": 0, "corpus [symbols]": 19873, "cover overflows": 25971, "coverage": 292969, "distributor delayed": 35959, "distributor undelayed": 35959, "distributor violated": 95, "exec candidate": 37334, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 5, "exec seeds": 0, "exec smash": 0, "exec total [base]": 82487, "exec total [new]": 191973, "exec triage": 115858, "executor restarts [base]": 213, "executor restarts [new]": 533, "fault jobs": 0, "fuzzer jobs": 41, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 9, "hints jobs": 0, "max signal": 295927, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 37334, "no exec duration": 48456000000, "no exec requests": 344, "pending": 1, "prog exec time": 218, "reproducing": 0, "rpc recv": 7809199520, "rpc sent": 1052113856, "signal": 287688, "smash jobs": 0, "triage jobs": 0, "vm output": 21935325, "vm restarts [base]": 17, "vm restarts [new]": 55 } 2025/08/27 17:55:00 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/08/27 17:55:03 runner 1 connected 2025/08/27 17:55:08 runner 6 connected 2025/08/27 17:55:15 patched crashed: INFO: task hung in reg_check_chans_work [need repro = true] 2025/08/27 17:55:15 scheduled a reproduction of 'INFO: task hung in reg_check_chans_work' 2025/08/27 17:55:40 patched crashed: general protection fault in pcl818_ai_cancel [need repro = false] 2025/08/27 17:55:55 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/08/27 17:55:57 runner 1 connected 2025/08/27 17:56:04 runner 9 connected 2025/08/27 17:56:37 runner 4 connected 2025/08/27 17:56:40 base crash "kernel BUG in jfs_evict_inode" is already known 2025/08/27 17:56:40 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/08/27 17:56:41 base crash "kernel BUG in txUnlock" is already known 2025/08/27 17:56:41 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/08/27 17:56:42 base crash "kernel BUG in txUnlock" is already known 2025/08/27 17:56:42 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/08/27 17:56:42 base crash "kernel BUG in txUnlock" is already known 2025/08/27 17:56:42 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/08/27 17:56:43 base crash "kernel BUG in txUnlock" is already known 2025/08/27 17:56:43 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/08/27 17:56:48 base crash: kernel BUG in txUnlock 2025/08/27 17:56:53 runner 5 connected 2025/08/27 17:57:20 base crash: kernel BUG in jfs_evict_inode 2025/08/27 17:57:29 runner 8 connected 2025/08/27 17:57:30 runner 9 connected 2025/08/27 17:57:31 runner 1 connected 2025/08/27 17:57:33 runner 7 connected 2025/08/27 17:57:36 runner 2 connected 2025/08/27 17:57:38 runner 0 connected 2025/08/27 17:58:01 base crash: WARNING in xfrm6_tunnel_net_exit 2025/08/27 17:58:08 runner 1 connected 2025/08/27 17:58:50 runner 3 connected 2025/08/27 17:58:56 base crash "possible deadlock in ocfs2_reserve_suballoc_bits" is already known 2025/08/27 17:58:56 patched crashed: possible deadlock in ocfs2_reserve_suballoc_bits [need repro = false] 2025/08/27 17:59:00 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 17:59:09 base crash "possible deadlock in ocfs2_reserve_suballoc_bits" is already known 2025/08/27 17:59:09 patched crashed: possible deadlock in ocfs2_reserve_suballoc_bits [need repro = false] 2025/08/27 17:59:09 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 17:59:18 base crash: possible deadlock in ocfs2_init_acl 2025/08/27 17:59:38 STAT { "buffer too small": 0, "candidate triage jobs": 22, "candidates": 39794, "comps overflows": 0, "corpus": 39676, "corpus [files]": 0, "corpus [symbols]": 21399, "cover overflows": 27924, "coverage": 299355, "distributor delayed": 39385, "distributor undelayed": 39385, "distributor violated": 96, "exec candidate": 40245, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 9, "exec seeds": 0, "exec smash": 0, "exec total [base]": 86935, "exec total [new]": 210251, "exec triage": 124766, "executor restarts [base]": 246, "executor restarts [new]": 608, "fault jobs": 0, "fuzzer jobs": 22, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 6, "hints jobs": 0, "max signal": 302248, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 40245, "no exec duration": 48687000000, "no exec requests": 346, "pending": 2, "prog exec time": 343, "reproducing": 0, "rpc recv": 8663737872, "rpc sent": 1187922816, "signal": 294082, "smash jobs": 0, "triage jobs": 0, "vm output": 25792652, "vm restarts [base]": 21, "vm restarts [new]": 65 } 2025/08/27 17:59:45 runner 3 connected 2025/08/27 17:59:51 runner 1 connected 2025/08/27 17:59:58 runner 8 connected 2025/08/27 17:59:58 runner 9 connected 2025/08/27 18:00:07 runner 1 connected 2025/08/27 18:00:45 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 18:01:27 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 18:01:34 runner 7 connected 2025/08/27 18:02:24 runner 9 connected 2025/08/27 18:03:53 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 18:04:01 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 18:04:34 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/08/27 18:04:36 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/08/27 18:04:38 STAT { "buffer too small": 0, "candidate triage jobs": 19, "candidates": 37728, "comps overflows": 0, "corpus": 41669, "corpus [files]": 0, "corpus [symbols]": 22493, "cover overflows": 31980, "coverage": 304065, "distributor delayed": 41146, "distributor undelayed": 41146, "distributor violated": 96, "exec candidate": 42311, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 10, "exec seeds": 0, "exec smash": 0, "exec total [base]": 94965, "exec total [new]": 235169, "exec triage": 131227, "executor restarts [base]": 271, "executor restarts [new]": 687, "fault jobs": 0, "fuzzer jobs": 19, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 6, "hints jobs": 0, "max signal": 307215, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 42311, "no exec duration": 48697000000, "no exec requests": 347, "pending": 2, "prog exec time": 341, "reproducing": 0, "rpc recv": 9350998528, "rpc sent": 1344404848, "signal": 298688, "smash jobs": 0, "triage jobs": 0, "vm output": 29734075, "vm restarts [base]": 22, "vm restarts [new]": 71 } 2025/08/27 18:04:42 runner 9 connected 2025/08/27 18:04:51 runner 4 connected 2025/08/27 18:05:07 patched crashed: KASAN: slab-use-after-free Read in xfrm_alloc_spi [need repro = false] 2025/08/27 18:05:23 runner 1 connected 2025/08/27 18:05:25 runner 7 connected 2025/08/27 18:05:56 runner 6 connected 2025/08/27 18:07:24 base crash "WARNING in dbAdjTree" is already known 2025/08/27 18:07:24 patched crashed: WARNING in dbAdjTree [need repro = false] 2025/08/27 18:07:25 base crash "WARNING in dbAdjTree" is already known 2025/08/27 18:07:25 patched crashed: WARNING in dbAdjTree [need repro = false] 2025/08/27 18:07:26 base crash "WARNING in dbAdjTree" is already known 2025/08/27 18:07:26 patched crashed: WARNING in dbAdjTree [need repro = false] 2025/08/27 18:07:27 base crash "WARNING in dbAdjTree" is already known 2025/08/27 18:07:27 patched crashed: WARNING in dbAdjTree [need repro = false] 2025/08/27 18:07:36 base crash "possible deadlock in ocfs2_xattr_set" is already known 2025/08/27 18:07:36 patched crashed: possible deadlock in ocfs2_xattr_set [need repro = false] 2025/08/27 18:08:14 runner 8 connected 2025/08/27 18:08:16 runner 6 connected 2025/08/27 18:08:22 runner 3 connected 2025/08/27 18:08:23 runner 7 connected 2025/08/27 18:08:25 runner 2 connected 2025/08/27 18:08:57 base crash: unregister_netdevice: waiting for DEV to become free 2025/08/27 18:09:38 STAT { "buffer too small": 0, "candidate triage jobs": 22, "candidates": 35963, "comps overflows": 0, "corpus": 43354, "corpus [files]": 0, "corpus [symbols]": 23393, "cover overflows": 35415, "coverage": 307775, "distributor delayed": 43085, "distributor undelayed": 43085, "distributor violated": 110, "exec candidate": 44076, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 11, "exec seeds": 0, "exec smash": 0, "exec total [base]": 104412, "exec total [new]": 256424, "exec triage": 136797, "executor restarts [base]": 290, "executor restarts [new]": 752, "fault jobs": 0, "fuzzer jobs": 22, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 10, "hints jobs": 0, "max signal": 311151, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 44076, "no exec duration": 48865000000, "no exec requests": 350, "pending": 2, "prog exec time": 259, "reproducing": 0, "rpc recv": 10088362828, "rpc sent": 1516466984, "signal": 302438, "smash jobs": 0, "triage jobs": 0, "vm output": 32642195, "vm restarts [base]": 22, "vm restarts [new]": 81 } 2025/08/27 18:09:46 runner 1 connected 2025/08/27 18:10:53 base crash: WARNING in dbAdjTree 2025/08/27 18:11:12 patched crashed: no output from test machine [need repro = false] 2025/08/27 18:11:32 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 18:11:42 runner 0 connected 2025/08/27 18:12:01 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/08/27 18:12:09 runner 9 connected 2025/08/27 18:12:21 runner 4 connected 2025/08/27 18:12:50 runner 6 connected 2025/08/27 18:14:24 base crash: KASAN: slab-use-after-free Read in xfrm_alloc_spi 2025/08/27 18:14:28 VM-9 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:11138: connect: connection refused 2025/08/27 18:14:28 VM-9 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:11138: connect: connection refused 2025/08/27 18:14:38 STAT { "buffer too small": 0, "candidate triage jobs": 16, "candidates": 35061, "comps overflows": 0, "corpus": 44156, "corpus [files]": 0, "corpus [symbols]": 23850, "cover overflows": 40068, "coverage": 310279, "distributor delayed": 43835, "distributor undelayed": 43835, "distributor violated": 110, "exec candidate": 44978, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 11, "exec seeds": 0, "exec smash": 0, "exec total [base]": 117055, "exec total [new]": 282837, "exec triage": 139650, "executor restarts [base]": 310, "executor restarts [new]": 808, "fault jobs": 0, "fuzzer jobs": 16, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 9, "hints jobs": 0, "max signal": 313685, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 44948, "no exec duration": 49100000000, "no exec requests": 352, "pending": 2, "prog exec time": 291, "reproducing": 0, "rpc recv": 10622008616, "rpc sent": 1674773872, "signal": 304689, "smash jobs": 0, "triage jobs": 0, "vm output": 35778720, "vm restarts [base]": 24, "vm restarts [new]": 84 } 2025/08/27 18:14:38 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 18:14:54 patched crashed: KASAN: slab-use-after-free Read in xfrm_alloc_spi [need repro = false] 2025/08/27 18:15:12 patched crashed: KASAN: slab-use-after-free Read in xfrm_alloc_spi [need repro = false] 2025/08/27 18:15:17 base crash "KASAN: slab-use-after-free Read in xfrm_state_find" is already known 2025/08/27 18:15:17 patched crashed: KASAN: slab-use-after-free Read in xfrm_state_find [need repro = false] 2025/08/27 18:15:21 runner 3 connected 2025/08/27 18:15:27 runner 9 connected 2025/08/27 18:15:44 runner 0 connected 2025/08/27 18:16:00 base crash: INFO: task hung in v9fs_evict_inode 2025/08/27 18:16:01 runner 5 connected 2025/08/27 18:16:06 runner 2 connected 2025/08/27 18:16:06 patched crashed: INFO: task hung in v9fs_evict_inode [need repro = false] 2025/08/27 18:16:17 patched crashed: INFO: task hung in v9fs_evict_inode [need repro = false] 2025/08/27 18:16:19 patched crashed: INFO: task hung in v9fs_evict_inode [need repro = false] 2025/08/27 18:16:31 patched crashed: INFO: task hung in reg_check_chans_work [need repro = true] 2025/08/27 18:16:31 scheduled a reproduction of 'INFO: task hung in reg_check_chans_work' 2025/08/27 18:16:37 base crash "WARNING in xfrm_state_fini" is already known 2025/08/27 18:16:37 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 18:16:51 runner 2 connected 2025/08/27 18:17:05 runner 7 connected 2025/08/27 18:17:05 runner 8 connected 2025/08/27 18:17:07 runner 4 connected 2025/08/27 18:17:21 runner 1 connected 2025/08/27 18:17:25 runner 3 connected 2025/08/27 18:19:38 STAT { "buffer too small": 0, "candidate triage jobs": 6, "candidates": 29232, "comps overflows": 0, "corpus": 44909, "corpus [files]": 0, "corpus [symbols]": 24281, "cover overflows": 43662, "coverage": 311677, "distributor delayed": 44637, "distributor undelayed": 44637, "distributor violated": 111, "exec candidate": 50807, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 12, "exec seeds": 0, "exec smash": 0, "exec total [base]": 127890, "exec total [new]": 305126, "exec triage": 142146, "executor restarts [base]": 328, "executor restarts [new]": 873, "fault jobs": 0, "fuzzer jobs": 6, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 10, "hints jobs": 0, "max signal": 315138, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 45737, "no exec duration": 49100000000, "no exec requests": 352, "pending": 3, "prog exec time": 276, "reproducing": 0, "rpc recv": 11327385208, "rpc sent": 1829179504, "signal": 306085, "smash jobs": 0, "triage jobs": 0, "vm output": 38857390, "vm restarts [base]": 26, "vm restarts [new]": 93 } 2025/08/27 18:21:04 base crash "KASAN: slab-use-after-free Read in __xfrm_state_lookup" is already known 2025/08/27 18:21:04 patched crashed: KASAN: slab-use-after-free Read in __xfrm_state_lookup [need repro = false] 2025/08/27 18:21:15 patched crashed: KASAN: slab-use-after-free Read in xfrm_alloc_spi [need repro = false] 2025/08/27 18:22:01 runner 1 connected 2025/08/27 18:22:05 runner 8 connected 2025/08/27 18:22:43 base crash: KASAN: slab-use-after-free Read in xfrm_state_find 2025/08/27 18:22:50 patched crashed: KASAN: slab-use-after-free Read in xfrm_alloc_spi [need repro = false] 2025/08/27 18:23:38 triaged 91.1% of the corpus 2025/08/27 18:23:38 starting bug reproductions 2025/08/27 18:23:38 starting bug reproductions (max 10 VMs, 7 repros) 2025/08/27 18:23:38 start reproducing 'possible deadlock in __netdev_update_features' 2025/08/27 18:23:38 start reproducing 'INFO: task hung in reg_check_chans_work' 2025/08/27 18:23:40 runner 0 connected 2025/08/27 18:23:45 base crash: KASAN: slab-use-after-free Read in __xfrm_state_lookup 2025/08/27 18:23:47 runner 3 connected 2025/08/27 18:24:34 runner 1 connected 2025/08/27 18:24:38 STAT { "buffer too small": 0, "candidate triage jobs": 4, "candidates": 3233, "comps overflows": 0, "corpus": 45258, "corpus [files]": 0, "corpus [symbols]": 24485, "cover overflows": 48670, "coverage": 312280, "distributor delayed": 45042, "distributor undelayed": 45042, "distributor violated": 111, "exec candidate": 76806, "exec collide": 0, "exec fuzz": 0, "exec gen": 0, "exec hints": 0, "exec inject": 0, "exec minimize": 0, "exec retries": 13, "exec seeds": 0, "exec smash": 0, "exec total [base]": 139197, "exec total [new]": 332526, "exec triage": 143529, "executor restarts [base]": 355, "executor restarts [new]": 932, "fault jobs": 0, "fuzzer jobs": 4, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 7, "hints jobs": 0, "max signal": 315930, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 0, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 46144, "no exec duration": 49107000000, "no exec requests": 354, "pending": 1, "prog exec time": 376, "reproducing": 2, "rpc recv": 11748528872, "rpc sent": 1966645496, "signal": 306688, "smash jobs": 0, "triage jobs": 0, "vm output": 42065809, "vm restarts [base]": 28, "vm restarts [new]": 96 } 2025/08/27 18:24:51 base crash "WARNING in xfrm_state_fini" is already known 2025/08/27 18:24:51 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 18:25:40 runner 5 connected 2025/08/27 18:25:42 base crash: WARNING in udf_setsize 2025/08/27 18:26:33 runner 0 connected 2025/08/27 18:26:41 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 18:27:13 base crash: WARNING in xfrm_state_fini 2025/08/27 18:27:30 runner 5 connected 2025/08/27 18:28:02 runner 2 connected 2025/08/27 18:28:04 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 18:28:50 patched crashed: general protection fault in device_move [need repro = false] 2025/08/27 18:28:52 base crash: INFO: task hung in bch2_journal_reclaim_thread 2025/08/27 18:28:53 runner 3 connected 2025/08/27 18:28:54 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 18:29:12 base crash: INFO: task hung in bch2_journal_reclaim_thread 2025/08/27 18:29:26 patched crashed: INFO: task hung in bch2_journal_reclaim_thread [need repro = false] 2025/08/27 18:29:38 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 33, "corpus": 45344, "corpus [files]": 0, "corpus [symbols]": 24530, "cover overflows": 51094, "coverage": 312756, "distributor delayed": 45279, "distributor undelayed": 45279, "distributor violated": 111, "exec candidate": 80039, "exec collide": 922, "exec fuzz": 1853, "exec gen": 74, "exec hints": 709, "exec inject": 0, "exec minimize": 1373, "exec retries": 14, "exec seeds": 151, "exec smash": 1146, "exec total [base]": 145558, "exec total [new]": 342473, "exec triage": 144012, "executor restarts [base]": 386, "executor restarts [new]": 985, "fault jobs": 0, "fuzzer jobs": 43, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 4, "hints jobs": 12, "max signal": 317117, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 746, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 46303, "no exec duration": 49741000000, "no exec requests": 357, "pending": 1, "prog exec time": 805, "reproducing": 2, "rpc recv": 12203247396, "rpc sent": 2147086168, "signal": 306954, "smash jobs": 21, "triage jobs": 10, "vm output": 46010144, "vm restarts [base]": 30, "vm restarts [new]": 99 } 2025/08/27 18:29:39 runner 4 connected 2025/08/27 18:29:40 runner 3 connected 2025/08/27 18:29:43 runner 0 connected 2025/08/27 18:29:46 patched crashed: INFO: task hung in bch2_journal_reclaim_thread [need repro = false] 2025/08/27 18:29:51 patched crashed: INFO: task hung in bch2_journal_reclaim_thread [need repro = false] 2025/08/27 18:30:00 runner 1 connected 2025/08/27 18:30:15 runner 7 connected 2025/08/27 18:30:36 runner 6 connected 2025/08/27 18:30:41 runner 8 connected 2025/08/27 18:31:18 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 18:31:34 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 18:32:02 patched crashed: INFO: task hung in bch2_journal_reclaim_thread [need repro = false] 2025/08/27 18:32:15 runner 7 connected 2025/08/27 18:32:24 runner 6 connected 2025/08/27 18:32:51 runner 9 connected 2025/08/27 18:32:56 base crash: INFO: task hung in bch2_journal_reclaim_thread 2025/08/27 18:32:57 base crash "possible deadlock in run_unpack_ex" is already known 2025/08/27 18:32:57 patched crashed: possible deadlock in run_unpack_ex [need repro = false] 2025/08/27 18:32:59 base crash: INFO: task hung in bch2_journal_reclaim_thread 2025/08/27 18:33:19 base crash "possible deadlock in run_unpack_ex" is already known 2025/08/27 18:33:19 patched crashed: possible deadlock in run_unpack_ex [need repro = false] 2025/08/27 18:33:20 base crash: possible deadlock in run_unpack_ex 2025/08/27 18:33:36 patched crashed: INFO: task hung in bch2_journal_reclaim_thread [need repro = false] 2025/08/27 18:33:46 runner 3 connected 2025/08/27 18:33:48 runner 0 connected 2025/08/27 18:33:48 runner 6 connected 2025/08/27 18:34:08 runner 5 connected 2025/08/27 18:34:10 runner 1 connected 2025/08/27 18:34:25 runner 4 connected 2025/08/27 18:34:38 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 99, "corpus": 45413, "corpus [files]": 0, "corpus [symbols]": 24565, "cover overflows": 52820, "coverage": 312926, "distributor delayed": 45484, "distributor undelayed": 45484, "distributor violated": 111, "exec candidate": 80039, "exec collide": 1539, "exec fuzz": 3103, "exec gen": 149, "exec hints": 1296, "exec inject": 0, "exec minimize": 2698, "exec retries": 14, "exec seeds": 348, "exec smash": 2301, "exec total [base]": 148893, "exec total [new]": 348034, "exec triage": 144355, "executor restarts [base]": 438, "executor restarts [new]": 1042, "fault jobs": 0, "fuzzer jobs": 88, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 5, "hints jobs": 22, "max signal": 317466, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 1528, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 46427, "no exec duration": 53289000000, "no exec requests": 362, "pending": 1, "prog exec time": 731, "reproducing": 2, "rpc recv": 12963538540, "rpc sent": 2337885336, "signal": 307093, "smash jobs": 53, "triage jobs": 13, "vm output": 50166504, "vm restarts [base]": 36, "vm restarts [new]": 109 } 2025/08/27 18:34:38 patched crashed: INFO: task hung in bch2_journal_reclaim_thread [need repro = false] 2025/08/27 18:34:39 base crash "kernel BUG in ocfs2_write_cluster_by_desc" is already known 2025/08/27 18:34:39 patched crashed: kernel BUG in ocfs2_write_cluster_by_desc [need repro = false] 2025/08/27 18:35:27 runner 8 connected 2025/08/27 18:35:28 runner 5 connected 2025/08/27 18:35:31 patched crashed: INFO: task hung in bch2_journal_reclaim_thread [need repro = false] 2025/08/27 18:35:34 patched crashed: INFO: task hung in bch2_journal_reclaim_thread [need repro = false] 2025/08/27 18:36:03 repro finished 'possible deadlock in __netdev_update_features', repro=false crepro=false desc='' hub=false from_dashboard=false 2025/08/27 18:36:03 failed repro for "possible deadlock in __netdev_update_features", err=%!s() 2025/08/27 18:36:03 "possible deadlock in __netdev_update_features": saved crash log into 1756319763.crash.log 2025/08/27 18:36:03 "possible deadlock in __netdev_update_features": saved repro log into 1756319763.repro.log 2025/08/27 18:36:20 runner 7 connected 2025/08/27 18:36:23 runner 3 connected 2025/08/27 18:36:24 runner 0 connected 2025/08/27 18:37:03 base crash: INFO: task hung in bch2_journal_reclaim_thread 2025/08/27 18:37:26 base crash: INFO: task hung in bch2_journal_reclaim_thread 2025/08/27 18:37:32 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 18:37:55 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 18:37:59 runner 3 connected 2025/08/27 18:38:16 runner 1 connected 2025/08/27 18:38:20 runner 9 connected 2025/08/27 18:38:43 runner 8 connected 2025/08/27 18:38:53 base crash "kernel BUG in ocfs2_set_new_buffer_uptodate" is already known 2025/08/27 18:38:53 patched crashed: kernel BUG in ocfs2_set_new_buffer_uptodate [need repro = false] 2025/08/27 18:39:20 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 18:39:28 base crash: kernel BUG in jfs_evict_inode 2025/08/27 18:39:38 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 147, "corpus": 45479, "corpus [files]": 0, "corpus [symbols]": 24595, "cover overflows": 55627, "coverage": 313073, "distributor delayed": 45664, "distributor undelayed": 45664, "distributor violated": 111, "exec candidate": 80039, "exec collide": 2559, "exec fuzz": 5068, "exec gen": 247, "exec hints": 2256, "exec inject": 0, "exec minimize": 4421, "exec retries": 14, "exec seeds": 513, "exec smash": 4267, "exec total [base]": 153299, "exec total [new]": 356320, "exec triage": 144740, "executor restarts [base]": 465, "executor restarts [new]": 1111, "fault jobs": 0, "fuzzer jobs": 54, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 5, "hints jobs": 18, "max signal": 317943, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 2529, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 46563, "no exec duration": 58041000000, "no exec requests": 368, "pending": 1, "prog exec time": 636, "reproducing": 1, "rpc recv": 13580521488, "rpc sent": 2581509992, "signal": 307206, "smash jobs": 25, "triage jobs": 11, "vm output": 54132211, "vm restarts [base]": 38, "vm restarts [new]": 116 } 2025/08/27 18:39:38 patched crashed: INFO: task hung in bch2_journal_reclaim_thread [need repro = false] 2025/08/27 18:39:41 runner 5 connected 2025/08/27 18:39:57 patched crashed: INFO: task hung in bch2_journal_reclaim_thread [need repro = false] 2025/08/27 18:40:04 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 18:40:09 runner 6 connected 2025/08/27 18:40:17 runner 2 connected 2025/08/27 18:40:26 base crash: lost connection to test machine 2025/08/27 18:40:27 runner 4 connected 2025/08/27 18:40:46 runner 7 connected 2025/08/27 18:40:52 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 18:40:53 runner 3 connected 2025/08/27 18:41:14 runner 0 connected 2025/08/27 18:41:42 runner 6 connected 2025/08/27 18:41:42 base crash "possible deadlock in ocfs2_reserve_suballoc_bits" is already known 2025/08/27 18:41:42 patched crashed: possible deadlock in ocfs2_reserve_suballoc_bits [need repro = false] 2025/08/27 18:42:12 base crash: possible deadlock in ocfs2_reserve_suballoc_bits 2025/08/27 18:42:24 base crash: kernel BUG in ocfs2_set_new_buffer_uptodate 2025/08/27 18:42:29 base crash: possible deadlock in ocfs2_init_acl 2025/08/27 18:42:31 runner 9 connected 2025/08/27 18:43:01 runner 3 connected 2025/08/27 18:43:12 runner 2 connected 2025/08/27 18:43:20 runner 1 connected 2025/08/27 18:44:38 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 214, "corpus": 45541, "corpus [files]": 0, "corpus [symbols]": 24626, "cover overflows": 58382, "coverage": 313219, "distributor delayed": 45824, "distributor undelayed": 45824, "distributor violated": 111, "exec candidate": 80039, "exec collide": 3798, "exec fuzz": 7438, "exec gen": 365, "exec hints": 4168, "exec inject": 0, "exec minimize": 5714, "exec retries": 14, "exec seeds": 692, "exec smash": 5883, "exec total [base]": 157074, "exec total [new]": 365387, "exec triage": 145074, "executor restarts [base]": 509, "executor restarts [new]": 1183, "fault jobs": 0, "fuzzer jobs": 42, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 8, "hints jobs": 21, "max signal": 318336, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 3363, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 46680, "no exec duration": 64629000000, "no exec requests": 379, "pending": 1, "prog exec time": 499, "reproducing": 1, "rpc recv": 14254421368, "rpc sent": 2819132664, "signal": 307348, "smash jobs": 13, "triage jobs": 8, "vm output": 58844015, "vm restarts [base]": 43, "vm restarts [new]": 123 } 2025/08/27 18:45:11 base crash: INFO: task hung in bch2_journal_reclaim_thread 2025/08/27 18:45:29 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 18:46:02 runner 0 connected 2025/08/27 18:46:17 runner 4 connected 2025/08/27 18:46:20 patched crashed: INFO: task hung in bch2_journal_reclaim_thread [need repro = false] 2025/08/27 18:46:25 patched crashed: unregister_netdevice: waiting for DEV to become free [need repro = false] 2025/08/27 18:46:44 base crash: possible deadlock in run_unpack_ex 2025/08/27 18:47:10 runner 6 connected 2025/08/27 18:47:14 runner 8 connected 2025/08/27 18:47:34 runner 2 connected 2025/08/27 18:48:48 patched crashed: WARNING in dbAdjTree [need repro = false] 2025/08/27 18:48:54 base crash: WARNING in dbAdjTree 2025/08/27 18:48:56 patched crashed: KASAN: slab-use-after-free Read in xfrm_alloc_spi [need repro = false] 2025/08/27 18:49:00 base crash: INFO: task hung in __closure_sync 2025/08/27 18:49:37 runner 7 connected 2025/08/27 18:49:38 STAT { "buffer too small": 0, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 283, "corpus": 45595, "corpus [files]": 0, "corpus [symbols]": 24652, "cover overflows": 61609, "coverage": 313353, "distributor delayed": 45977, "distributor undelayed": 45977, "distributor violated": 111, "exec candidate": 80039, "exec collide": 5493, "exec fuzz": 10614, "exec gen": 510, "exec hints": 6728, "exec inject": 0, "exec minimize": 7120, "exec retries": 15, "exec seeds": 841, "exec smash": 7207, "exec total [base]": 161107, "exec total [new]": 376185, "exec triage": 145422, "executor restarts [base]": 544, "executor restarts [new]": 1231, "fault jobs": 0, "fuzzer jobs": 23, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 6, "hints jobs": 14, "max signal": 318609, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 4064, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 46796, "no exec duration": 67109000000, "no exec requests": 386, "pending": 1, "prog exec time": 552, "reproducing": 1, "rpc recv": 14724510832, "rpc sent": 3102891776, "signal": 307451, "smash jobs": 7, "triage jobs": 2, "vm output": 63477706, "vm restarts [base]": 45, "vm restarts [new]": 127 } 2025/08/27 18:49:43 runner 0 connected 2025/08/27 18:49:46 runner 9 connected 2025/08/27 18:49:49 runner 1 connected 2025/08/27 18:50:15 patched crashed: general protection fault in pcl818_ai_cancel [need repro = false] 2025/08/27 18:50:17 base crash: lost connection to test machine 2025/08/27 18:51:04 runner 4 connected 2025/08/27 18:51:06 runner 2 connected 2025/08/27 18:51:09 base crash: possible deadlock in ocfs2_init_acl 2025/08/27 18:51:13 base crash: KASAN: slab-use-after-free Read in __usb_hcd_giveback_urb 2025/08/27 18:51:57 runner 0 connected 2025/08/27 18:52:02 runner 1 connected 2025/08/27 18:53:18 base crash: WARNING in xfrm_state_fini 2025/08/27 18:53:20 patched crashed: unregister_netdevice: waiting for DEV to become free [need repro = false] 2025/08/27 18:54:09 runner 6 connected 2025/08/27 18:54:14 runner 0 connected 2025/08/27 18:54:20 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 18:54:38 STAT { "buffer too small": 1, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 304, "corpus": 45626, "corpus [files]": 0, "corpus [symbols]": 24663, "cover overflows": 66385, "coverage": 313414, "distributor delayed": 46126, "distributor undelayed": 46126, "distributor violated": 111, "exec candidate": 80039, "exec collide": 8901, "exec fuzz": 16880, "exec gen": 868, "exec hints": 9929, "exec inject": 0, "exec minimize": 8004, "exec retries": 15, "exec seeds": 928, "exec smash": 7980, "exec total [base]": 166594, "exec total [new]": 391535, "exec triage": 145791, "executor restarts [base]": 575, "executor restarts [new]": 1279, "fault jobs": 0, "fuzzer jobs": 24, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 7, "hints jobs": 12, "max signal": 318932, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 4532, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 46921, "no exec duration": 80980000000, "no exec requests": 422, "pending": 1, "prog exec time": 466, "reproducing": 1, "rpc recv": 15310981304, "rpc sent": 3502572688, "signal": 307496, "smash jobs": 8, "triage jobs": 4, "vm output": 66634576, "vm restarts [base]": 51, "vm restarts [new]": 130 } 2025/08/27 18:55:04 base crash: KASAN: slab-use-after-free Read in xfrm_state_find 2025/08/27 18:55:09 runner 8 connected 2025/08/27 18:55:49 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 18:55:52 runner 3 connected 2025/08/27 18:56:04 base crash "INFO: task hung in read_part_sector" is already known 2025/08/27 18:56:04 patched crashed: INFO: task hung in read_part_sector [need repro = false] 2025/08/27 18:56:37 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 18:56:39 runner 7 connected 2025/08/27 18:56:43 base crash: kernel BUG in jfs_evict_inode 2025/08/27 18:56:52 runner 0 connected 2025/08/27 18:57:05 patched crashed: KASAN: slab-use-after-free Read in xfrm_alloc_spi [need repro = false] 2025/08/27 18:57:26 runner 3 connected 2025/08/27 18:57:28 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 18:57:32 runner 0 connected 2025/08/27 18:57:45 patched crashed: WARNING in dbAdjTree [need repro = false] 2025/08/27 18:57:52 runner 4 connected 2025/08/27 18:57:54 base crash: lost connection to test machine 2025/08/27 18:58:16 patched crashed: possible deadlock in ocfs2_reserve_suballoc_bits [need repro = false] 2025/08/27 18:58:17 runner 6 connected 2025/08/27 18:58:35 runner 0 connected 2025/08/27 18:58:42 runner 1 connected 2025/08/27 18:58:49 patched crashed: KASAN: slab-use-after-free Read in __xfrm_state_lookup [need repro = false] 2025/08/27 18:59:05 runner 4 connected 2025/08/27 18:59:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 318, "corpus": 45643, "corpus [files]": 0, "corpus [symbols]": 24672, "cover overflows": 68917, "coverage": 313468, "distributor delayed": 46240, "distributor undelayed": 46240, "distributor violated": 111, "exec candidate": 80039, "exec collide": 11091, "exec fuzz": 20982, "exec gen": 1090, "exec hints": 11712, "exec inject": 0, "exec minimize": 8343, "exec retries": 16, "exec seeds": 980, "exec smash": 8391, "exec total [base]": 173383, "exec total [new]": 400868, "exec triage": 146024, "executor restarts [base]": 608, "executor restarts [new]": 1373, "fault jobs": 0, "fuzzer jobs": 15, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 6, "hints jobs": 6, "max signal": 319087, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 4738, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 46995, "no exec duration": 89388000000, "no exec requests": 446, "pending": 1, "prog exec time": 661, "reproducing": 1, "rpc recv": 15937392908, "rpc sent": 3814426512, "signal": 307520, "smash jobs": 4, "triage jobs": 5, "vm output": 71248393, "vm restarts [base]": 54, "vm restarts [new]": 138 } 2025/08/27 18:59:38 runner 9 connected 2025/08/27 18:59:46 patched crashed: INFO: rcu detected stall in do_idle [need repro = false] 2025/08/27 19:00:10 patched crashed: KASAN: slab-out-of-bounds Read in dtSplitPage [need repro = true] 2025/08/27 19:00:10 scheduled a reproduction of 'KASAN: slab-out-of-bounds Read in dtSplitPage' 2025/08/27 19:00:10 start reproducing 'KASAN: slab-out-of-bounds Read in dtSplitPage' 2025/08/27 19:00:18 base crash: lost connection to test machine 2025/08/27 19:00:23 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 19:00:35 runner 5 connected 2025/08/27 19:00:48 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:01:08 runner 1 connected 2025/08/27 19:01:12 runner 8 connected 2025/08/27 19:01:12 base crash: KASAN: slab-use-after-free Read in xfrm_alloc_spi 2025/08/27 19:01:39 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:02:02 runner 2 connected 2025/08/27 19:02:10 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 19:02:12 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:02:19 VM-1 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:35736: connect: connection refused 2025/08/27 19:02:19 VM-1 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:35736: connect: connection refused 2025/08/27 19:02:29 base crash: lost connection to test machine 2025/08/27 19:02:57 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:02:59 runner 4 connected 2025/08/27 19:03:06 patched crashed: INFO: task hung in __closure_sync [need repro = false] 2025/08/27 19:03:12 base crash: WARNING in xfrm_state_fini 2025/08/27 19:03:18 runner 1 connected 2025/08/27 19:03:26 patched crashed: WARNING in dbAdjTree [need repro = false] 2025/08/27 19:03:30 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 19:03:55 runner 3 connected 2025/08/27 19:04:01 runner 0 connected 2025/08/27 19:04:16 runner 6 connected 2025/08/27 19:04:19 runner 9 connected 2025/08/27 19:04:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 358, "corpus": 45681, "corpus [files]": 0, "corpus [symbols]": 24697, "cover overflows": 70631, "coverage": 313750, "distributor delayed": 46348, "distributor undelayed": 46348, "distributor violated": 111, "exec candidate": 80039, "exec collide": 12253, "exec fuzz": 23259, "exec gen": 1201, "exec hints": 12480, "exec inject": 0, "exec minimize": 9301, "exec retries": 16, "exec seeds": 1088, "exec smash": 9124, "exec total [base]": 177604, "exec total [new]": 407223, "exec triage": 146255, "executor restarts [base]": 661, "executor restarts [new]": 1460, "fault jobs": 0, "fuzzer jobs": 33, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 7, "hints jobs": 10, "max signal": 319488, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 5305, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47082, "no exec duration": 95244000000, "no exec requests": 455, "pending": 1, "prog exec time": 898, "reproducing": 2, "rpc recv": 16530095488, "rpc sent": 4408124752, "signal": 307597, "smash jobs": 12, "triage jobs": 11, "vm output": 76505020, "vm restarts [base]": 58, "vm restarts [new]": 145 } 2025/08/27 19:05:12 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:05:14 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:05:22 base crash "INFO: task hung in sync_bdevs" is already known 2025/08/27 19:05:22 patched crashed: INFO: task hung in sync_bdevs [need repro = false] 2025/08/27 19:05:30 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 19:05:38 base crash: WARNING in xfrm_state_fini 2025/08/27 19:05:48 base crash: WARNING in xfrm6_tunnel_net_exit 2025/08/27 19:06:01 runner 5 connected 2025/08/27 19:06:04 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:06:12 runner 7 connected 2025/08/27 19:06:21 runner 3 connected 2025/08/27 19:06:27 runner 3 connected 2025/08/27 19:06:37 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:06:37 runner 2 connected 2025/08/27 19:06:58 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 19:07:10 base crash: possible deadlock in run_unpack_ex 2025/08/27 19:07:24 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:07:31 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:07:47 runner 9 connected 2025/08/27 19:07:57 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:07:59 runner 1 connected 2025/08/27 19:08:10 base crash: WARNING in __linkwatch_sync_dev 2025/08/27 19:08:20 runner 5 connected 2025/08/27 19:08:31 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 19:08:41 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:08:59 runner 0 connected 2025/08/27 19:09:15 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:09:20 runner 8 connected 2025/08/27 19:09:21 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 19:09:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 410, "corpus": 45722, "corpus [files]": 0, "corpus [symbols]": 24729, "cover overflows": 72168, "coverage": 313818, "distributor delayed": 46454, "distributor undelayed": 46454, "distributor violated": 111, "exec candidate": 80039, "exec collide": 12925, "exec fuzz": 24535, "exec gen": 1263, "exec hints": 13369, "exec inject": 0, "exec minimize": 10414, "exec retries": 16, "exec seeds": 1194, "exec smash": 10005, "exec total [base]": 181103, "exec total [new]": 412448, "exec triage": 146477, "executor restarts [base]": 699, "executor restarts [new]": 1552, "fault jobs": 0, "fuzzer jobs": 39, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 7, "hints jobs": 16, "max signal": 319685, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 6108, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47168, "no exec duration": 101244000000, "no exec requests": 461, "pending": 1, "prog exec time": 910, "reproducing": 2, "rpc recv": 17118475196, "rpc sent": 4872781152, "signal": 307659, "smash jobs": 14, "triage jobs": 9, "vm output": 81204079, "vm restarts [base]": 62, "vm restarts [new]": 151 } 2025/08/27 19:10:06 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/08/27 19:10:08 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:10:10 runner 3 connected 2025/08/27 19:10:28 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 19:10:31 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 19:10:33 base crash: WARNING in dbAdjTree 2025/08/27 19:10:36 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:10:47 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 19:10:55 runner 6 connected 2025/08/27 19:11:16 runner 5 connected 2025/08/27 19:11:19 runner 4 connected 2025/08/27 19:11:22 runner 3 connected 2025/08/27 19:11:29 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:11:36 runner 1 connected 2025/08/27 19:12:46 base crash "possible deadlock in mark_as_free_ex" is already known 2025/08/27 19:12:46 patched crashed: possible deadlock in mark_as_free_ex [need repro = false] 2025/08/27 19:12:50 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:13:34 runner 7 connected 2025/08/27 19:13:37 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:14:33 runner 5 connected 2025/08/27 19:14:34 patched crashed: KASAN: slab-use-after-free Read in __xfrm_state_lookup [need repro = false] 2025/08/27 19:14:37 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:14:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 481, "corpus": 45763, "corpus [files]": 0, "corpus [symbols]": 24753, "cover overflows": 74546, "coverage": 313886, "distributor delayed": 46568, "distributor undelayed": 46568, "distributor violated": 111, "exec candidate": 80039, "exec collide": 13918, "exec fuzz": 26333, "exec gen": 1362, "exec hints": 15058, "exec inject": 0, "exec minimize": 11647, "exec retries": 16, "exec seeds": 1288, "exec smash": 11027, "exec total [base]": 185251, "exec total [new]": 419602, "exec triage": 146711, "executor restarts [base]": 730, "executor restarts [new]": 1588, "fault jobs": 0, "fuzzer jobs": 28, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 3, "hints jobs": 14, "max signal": 319889, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 6770, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47243, "no exec duration": 109760000000, "no exec requests": 470, "pending": 1, "prog exec time": 524, "reproducing": 2, "rpc recv": 17620030728, "rpc sent": 5296674200, "signal": 307716, "smash jobs": 5, "triage jobs": 9, "vm output": 87972052, "vm restarts [base]": 65, "vm restarts [new]": 156 } 2025/08/27 19:14:50 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:15:16 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 19:15:22 runner 6 connected 2025/08/27 19:15:26 runner 9 connected 2025/08/27 19:15:39 base crash: WARNING in xfrm_state_fini 2025/08/27 19:15:40 runner 8 connected 2025/08/27 19:15:51 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:16:05 runner 3 connected 2025/08/27 19:16:13 patched crashed: WARNING in dbAdjTree [need repro = false] 2025/08/27 19:16:18 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:16:22 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:16:28 runner 0 connected 2025/08/27 19:16:43 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/08/27 19:17:01 runner 6 connected 2025/08/27 19:17:10 runner 5 connected 2025/08/27 19:17:10 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:17:28 base crash: WARNING in xfrm_state_fini 2025/08/27 19:17:40 runner 7 connected 2025/08/27 19:17:41 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:17:41 patched crashed: WARNING in bch2_trans_srcu_unlock [need repro = true] 2025/08/27 19:17:41 scheduled a reproduction of 'WARNING in bch2_trans_srcu_unlock' 2025/08/27 19:17:41 start reproducing 'WARNING in bch2_trans_srcu_unlock' 2025/08/27 19:18:05 base crash "kernel BUG in may_open" is already known 2025/08/27 19:18:05 patched crashed: kernel BUG in may_open [need repro = false] 2025/08/27 19:18:17 runner 0 connected 2025/08/27 19:18:30 runner 4 connected 2025/08/27 19:18:34 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:18:57 base crash: lost connection to test machine 2025/08/27 19:19:01 runner 8 connected 2025/08/27 19:19:03 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:19:06 base crash: lost connection to test machine 2025/08/27 19:19:24 base crash: general protection fault in pcl818_ai_cancel 2025/08/27 19:19:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 532, "corpus": 45798, "corpus [files]": 0, "corpus [symbols]": 24772, "cover overflows": 76529, "coverage": 313976, "distributor delayed": 46660, "distributor undelayed": 46660, "distributor violated": 111, "exec candidate": 80039, "exec collide": 14604, "exec fuzz": 27655, "exec gen": 1435, "exec hints": 16333, "exec inject": 0, "exec minimize": 12396, "exec retries": 17, "exec seeds": 1365, "exec smash": 11658, "exec total [base]": 189828, "exec total [new]": 424585, "exec triage": 146868, "executor restarts [base]": 754, "executor restarts [new]": 1652, "fault jobs": 0, "fuzzer jobs": 36, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 6, "hints jobs": 17, "max signal": 320005, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 7211, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47302, "no exec duration": 115760000000, "no exec requests": 476, "pending": 1, "prog exec time": 947, "reproducing": 3, "rpc recv": 18275283944, "rpc sent": 5587262888, "signal": 307800, "smash jobs": 12, "triage jobs": 7, "vm output": 92590550, "vm restarts [base]": 67, "vm restarts [new]": 165 } 2025/08/27 19:19:47 runner 3 connected 2025/08/27 19:19:53 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:19:55 runner 0 connected 2025/08/27 19:20:12 runner 1 connected 2025/08/27 19:20:14 patched crashed: unregister_netdevice: waiting for DEV to become free [need repro = false] 2025/08/27 19:20:24 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:20:49 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:20:54 patched crashed: WARNING in dbAdjTree [need repro = false] 2025/08/27 19:21:02 base crash: kernel BUG in may_open 2025/08/27 19:21:03 runner 9 connected 2025/08/27 19:21:16 reproducing crash 'KASAN: slab-out-of-bounds Read in dtSplitPage': failed to symbolize report: failed to start scripts/get_maintainer.pl [scripts/get_maintainer.pl --git-min-percent=15 -f fs/jfs/jfs_dtree.c]: fork/exec scripts/get_maintainer.pl: no such file or directory 2025/08/27 19:21:16 repro finished 'KASAN: slab-out-of-bounds Read in dtSplitPage', repro=true crepro=false desc='KASAN: slab-out-of-bounds Read in dtSplitPage' hub=false from_dashboard=false 2025/08/27 19:21:16 found repro for "KASAN: slab-out-of-bounds Read in dtSplitPage" (orig title: "-SAME-", reliability: 1), took 21.02 minutes 2025/08/27 19:21:16 "KASAN: slab-out-of-bounds Read in dtSplitPage": saved crash log into 1756322476.crash.log 2025/08/27 19:21:16 "KASAN: slab-out-of-bounds Read in dtSplitPage": saved repro log into 1756322476.repro.log 2025/08/27 19:21:29 base crash: unregister_netdevice: waiting for DEV to become free 2025/08/27 19:21:38 runner 4 connected 2025/08/27 19:21:43 runner 5 connected 2025/08/27 19:22:18 runner 2 connected 2025/08/27 19:22:21 attempt #0 to run "KASAN: slab-out-of-bounds Read in dtSplitPage" on base: aborting due to context cancelation 2025/08/27 19:22:37 runner 0 connected 2025/08/27 19:23:06 base crash: possible deadlock in ocfs2_setattr 2025/08/27 19:23:10 runner 0 connected 2025/08/27 19:23:55 runner 2 connected 2025/08/27 19:24:09 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 19:24:21 patched crashed: INFO: task hung in v9fs_evict_inode [need repro = false] 2025/08/27 19:24:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 560, "corpus": 45826, "corpus [files]": 0, "corpus [symbols]": 24792, "cover overflows": 78276, "coverage": 314223, "distributor delayed": 46761, "distributor undelayed": 46761, "distributor violated": 111, "exec candidate": 80039, "exec collide": 15666, "exec fuzz": 29553, "exec gen": 1547, "exec hints": 17593, "exec inject": 0, "exec minimize": 13065, "exec retries": 18, "exec seeds": 1424, "exec smash": 12361, "exec total [base]": 193035, "exec total [new]": 430515, "exec triage": 147034, "executor restarts [base]": 784, "executor restarts [new]": 1707, "fault jobs": 0, "fuzzer jobs": 28, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 4, "hints jobs": 12, "max signal": 320406, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 7590, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47361, "no exec duration": 118737000000, "no exec requests": 482, "pending": 1, "prog exec time": 756, "reproducing": 2, "rpc recv": 18815305336, "rpc sent": 5849302576, "signal": 308003, "smash jobs": 7, "triage jobs": 9, "vm output": 98320839, "vm restarts [base]": 73, "vm restarts [new]": 169 } 2025/08/27 19:24:42 patched crashed: kernel BUG in may_open [need repro = false] 2025/08/27 19:24:43 base crash: general protection fault in pcl818_ai_cancel 2025/08/27 19:24:58 runner 9 connected 2025/08/27 19:25:09 runner 8 connected 2025/08/27 19:25:23 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:25:30 runner 0 connected 2025/08/27 19:25:31 runner 0 connected 2025/08/27 19:25:35 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:26:12 runner 4 connected 2025/08/27 19:26:25 runner 6 connected 2025/08/27 19:26:59 base crash: kernel BUG in may_open 2025/08/27 19:26:59 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:27:33 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 19:27:47 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 19:27:47 runner 2 connected 2025/08/27 19:27:48 runner 4 connected 2025/08/27 19:28:00 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 19:28:17 base crash: lost connection to test machine 2025/08/27 19:28:23 runner 5 connected 2025/08/27 19:28:36 runner 7 connected 2025/08/27 19:28:38 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:28:47 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 19:28:49 runner 6 connected 2025/08/27 19:29:06 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 19:29:06 runner 0 connected 2025/08/27 19:29:19 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 19:29:35 runner 9 connected 2025/08/27 19:29:38 runner 4 connected 2025/08/27 19:29:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 588, "corpus": 45850, "corpus [files]": 0, "corpus [symbols]": 24808, "cover overflows": 79629, "coverage": 314307, "distributor delayed": 46845, "distributor undelayed": 46843, "distributor violated": 111, "exec candidate": 80039, "exec collide": 16294, "exec fuzz": 30711, "exec gen": 1612, "exec hints": 18257, "exec inject": 0, "exec minimize": 13794, "exec retries": 18, "exec seeds": 1474, "exec smash": 12834, "exec total [base]": 196666, "exec total [new]": 434422, "exec triage": 147173, "executor restarts [base]": 813, "executor restarts [new]": 1757, "fault jobs": 0, "fuzzer jobs": 31, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 3, "hints jobs": 8, "max signal": 320543, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 7998, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47411, "no exec duration": 118737000000, "no exec requests": 482, "pending": 1, "prog exec time": 1394, "reproducing": 2, "rpc recv": 19443360384, "rpc sent": 6094827688, "signal": 308077, "smash jobs": 12, "triage jobs": 11, "vm output": 102150553, "vm restarts [base]": 76, "vm restarts [new]": 180 } 2025/08/27 19:29:54 runner 8 connected 2025/08/27 19:30:07 runner 6 connected 2025/08/27 19:30:10 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 19:30:32 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 19:30:46 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 19:30:59 runner 3 connected 2025/08/27 19:31:08 base crash: possible deadlock in ocfs2_init_acl 2025/08/27 19:31:17 base crash "possible deadlock in ocfs2_calc_xattr_init" is already known 2025/08/27 19:31:17 patched crashed: possible deadlock in ocfs2_calc_xattr_init [need repro = false] 2025/08/27 19:31:23 runner 7 connected 2025/08/27 19:31:23 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 19:31:27 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 19:31:36 runner 1 connected 2025/08/27 19:31:57 runner 0 connected 2025/08/27 19:32:06 runner 0 connected 2025/08/27 19:32:14 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 19:32:15 runner 2 connected 2025/08/27 19:32:19 runner 9 connected 2025/08/27 19:32:21 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 19:32:30 base crash: possible deadlock in ocfs2_init_acl 2025/08/27 19:32:50 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 19:32:54 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:33:04 base crash: lost connection to test machine 2025/08/27 19:33:05 runner 6 connected 2025/08/27 19:33:10 runner 8 connected 2025/08/27 19:33:18 runner 1 connected 2025/08/27 19:33:20 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 19:33:39 runner 2 connected 2025/08/27 19:33:42 runner 4 connected 2025/08/27 19:33:53 runner 3 connected 2025/08/27 19:34:16 runner 5 connected 2025/08/27 19:34:21 patched crashed: kernel BUG in ocfs2_set_new_buffer_uptodate [need repro = false] 2025/08/27 19:34:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 652, "corpus": 45881, "corpus [files]": 0, "corpus [symbols]": 24832, "cover overflows": 81274, "coverage": 314402, "distributor delayed": 46910, "distributor undelayed": 46910, "distributor violated": 111, "exec candidate": 80039, "exec collide": 16939, "exec fuzz": 31815, "exec gen": 1683, "exec hints": 19160, "exec inject": 0, "exec minimize": 14533, "exec retries": 18, "exec seeds": 1569, "exec smash": 13507, "exec total [base]": 198794, "exec total [new]": 438805, "exec triage": 147314, "executor restarts [base]": 841, "executor restarts [new]": 1813, "fault jobs": 0, "fuzzer jobs": 48, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 4, "hints jobs": 22, "max signal": 320702, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 8445, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47467, "no exec duration": 118765000000, "no exec requests": 483, "pending": 1, "prog exec time": 838, "reproducing": 2, "rpc recv": 20215037916, "rpc sent": 6330048464, "signal": 308146, "smash jobs": 16, "triage jobs": 10, "vm output": 106576719, "vm restarts [base]": 83, "vm restarts [new]": 189 } 2025/08/27 19:34:39 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:34:47 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 19:34:54 base crash: possible deadlock in ocfs2_init_acl 2025/08/27 19:34:59 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:35:10 runner 6 connected 2025/08/27 19:35:27 runner 7 connected 2025/08/27 19:35:35 runner 8 connected 2025/08/27 19:35:40 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 19:35:42 runner 3 connected 2025/08/27 19:35:48 runner 4 connected 2025/08/27 19:35:54 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 19:35:55 base crash "possible deadlock in ocfs2_calc_xattr_init" is already known 2025/08/27 19:35:55 patched crashed: possible deadlock in ocfs2_calc_xattr_init [need repro = false] 2025/08/27 19:36:04 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/08/27 19:36:29 runner 6 connected 2025/08/27 19:36:31 base crash: possible deadlock in ocfs2_init_acl 2025/08/27 19:36:45 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 19:36:45 runner 7 connected 2025/08/27 19:36:45 runner 9 connected 2025/08/27 19:36:53 runner 0 connected 2025/08/27 19:37:21 runner 2 connected 2025/08/27 19:37:34 runner 5 connected 2025/08/27 19:37:40 base crash: WARNING in xfrm6_tunnel_net_exit 2025/08/27 19:37:45 base crash: WARNING in dbAdjTree 2025/08/27 19:38:32 runner 0 connected 2025/08/27 19:38:34 runner 3 connected 2025/08/27 19:38:58 base crash: possible deadlock in ocfs2_init_acl 2025/08/27 19:39:16 base crash: possible deadlock in ocfs2_init_acl 2025/08/27 19:39:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 743, "corpus": 45924, "corpus [files]": 0, "corpus [symbols]": 24859, "cover overflows": 83732, "coverage": 314525, "distributor delayed": 47013, "distributor undelayed": 47013, "distributor violated": 111, "exec candidate": 80039, "exec collide": 17644, "exec fuzz": 33137, "exec gen": 1745, "exec hints": 20271, "exec inject": 0, "exec minimize": 15734, "exec retries": 19, "exec seeds": 1691, "exec smash": 14356, "exec total [base]": 201518, "exec total [new]": 444387, "exec triage": 147517, "executor restarts [base]": 870, "executor restarts [new]": 1862, "fault jobs": 0, "fuzzer jobs": 71, "fuzzing VMs [base]": 1, "fuzzing VMs [new]": 7, "hints jobs": 26, "max signal": 320879, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 9123, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47543, "no exec duration": 118783000000, "no exec requests": 484, "pending": 1, "prog exec time": 699, "reproducing": 2, "rpc recv": 20893583856, "rpc sent": 6564737976, "signal": 308268, "smash jobs": 25, "triage jobs": 20, "vm output": 113363448, "vm restarts [base]": 87, "vm restarts [new]": 198 } 2025/08/27 19:39:46 runner 2 connected 2025/08/27 19:39:54 base crash: lost connection to test machine 2025/08/27 19:40:05 runner 1 connected 2025/08/27 19:40:23 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:40:25 base crash: possible deadlock in ocfs2_init_acl 2025/08/27 19:40:41 patched crashed: kernel BUG in txUnlock [need repro = false] 2025/08/27 19:40:43 runner 0 connected 2025/08/27 19:40:51 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 19:41:12 runner 7 connected 2025/08/27 19:41:13 runner 3 connected 2025/08/27 19:41:30 runner 8 connected 2025/08/27 19:41:38 base crash: possible deadlock in ocfs2_init_acl 2025/08/27 19:41:40 runner 6 connected 2025/08/27 19:41:53 base crash "KASAN: slab-use-after-free Read in l2cap_unregister_user" is already known 2025/08/27 19:41:53 patched crashed: KASAN: slab-use-after-free Read in l2cap_unregister_user [need repro = false] 2025/08/27 19:42:03 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 19:42:12 base crash "kernel BUG in ocfs2_write_cluster_by_desc" is already known 2025/08/27 19:42:12 patched crashed: kernel BUG in ocfs2_write_cluster_by_desc [need repro = false] 2025/08/27 19:42:28 runner 3 connected 2025/08/27 19:42:35 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 19:42:43 runner 0 connected 2025/08/27 19:42:50 runner 9 connected 2025/08/27 19:43:02 runner 4 connected 2025/08/27 19:43:09 base crash: possible deadlock in ocfs2_init_acl 2025/08/27 19:43:23 runner 0 connected 2025/08/27 19:43:55 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 19:43:58 runner 3 connected 2025/08/27 19:44:16 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 19:44:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 797, "corpus": 45968, "corpus [files]": 0, "corpus [symbols]": 24881, "cover overflows": 86109, "coverage": 314638, "distributor delayed": 47134, "distributor undelayed": 47134, "distributor violated": 111, "exec candidate": 80039, "exec collide": 18619, "exec fuzz": 34891, "exec gen": 1843, "exec hints": 21751, "exec inject": 0, "exec minimize": 16903, "exec retries": 19, "exec seeds": 1825, "exec smash": 15573, "exec total [base]": 204446, "exec total [new]": 451435, "exec triage": 147737, "executor restarts [base]": 911, "executor restarts [new]": 1913, "fault jobs": 0, "fuzzer jobs": 47, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 6, "hints jobs": 23, "max signal": 321090, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 9766, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47620, "no exec duration": 131827000000, "no exec requests": 500, "pending": 1, "prog exec time": 595, "reproducing": 2, "rpc recv": 21557286836, "rpc sent": 6784832240, "signal": 308382, "smash jobs": 15, "triage jobs": 9, "vm output": 119144327, "vm restarts [base]": 94, "vm restarts [new]": 204 } 2025/08/27 19:44:43 runner 5 connected 2025/08/27 19:45:12 runner 0 connected 2025/08/27 19:45:15 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:45:43 base crash: WARNING in xfrm6_tunnel_net_exit 2025/08/27 19:45:57 base crash: unregister_netdevice: waiting for DEV to become free 2025/08/27 19:46:04 runner 7 connected 2025/08/27 19:46:13 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/08/27 19:46:33 runner 2 connected 2025/08/27 19:46:42 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 19:46:47 runner 1 connected 2025/08/27 19:47:04 runner 8 connected 2025/08/27 19:47:31 runner 3 connected 2025/08/27 19:48:01 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 19:48:06 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 19:48:42 base crash: WARNING in xfrm_state_fini 2025/08/27 19:48:50 runner 1 connected 2025/08/27 19:48:56 runner 0 connected 2025/08/27 19:48:57 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/08/27 19:49:11 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:64441: connect: connection refused 2025/08/27 19:49:11 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:64441: connect: connection refused 2025/08/27 19:49:18 VM-8 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:21889: connect: connection refused 2025/08/27 19:49:18 VM-8 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:21889: connect: connection refused 2025/08/27 19:49:21 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:49:28 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:49:30 runner 2 connected 2025/08/27 19:49:37 base crash: WARNING in xfrm6_tunnel_net_exit 2025/08/27 19:49:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 878, "corpus": 46008, "corpus [files]": 0, "corpus [symbols]": 24906, "cover overflows": 88445, "coverage": 314737, "distributor delayed": 47234, "distributor undelayed": 47234, "distributor violated": 111, "exec candidate": 80039, "exec collide": 19466, "exec fuzz": 36421, "exec gen": 1916, "exec hints": 23278, "exec inject": 0, "exec minimize": 17865, "exec retries": 20, "exec seeds": 1936, "exec smash": 16378, "exec total [base]": 207151, "exec total [new]": 457478, "exec triage": 147927, "executor restarts [base]": 948, "executor restarts [new]": 1974, "fault jobs": 0, "fuzzer jobs": 59, "fuzzing VMs [base]": 2, "fuzzing VMs [new]": 4, "hints jobs": 24, "max signal": 321298, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 10361, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47695, "no exec duration": 134827000000, "no exec requests": 503, "pending": 1, "prog exec time": 885, "reproducing": 2, "rpc recv": 22131637372, "rpc sent": 6977758248, "signal": 308465, "smash jobs": 19, "triage jobs": 16, "vm output": 126240831, "vm restarts [base]": 101, "vm restarts [new]": 207 } 2025/08/27 19:49:47 runner 4 connected 2025/08/27 19:50:03 VM-7 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:59474: connect: connection refused 2025/08/27 19:50:03 VM-7 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:59474: connect: connection refused 2025/08/27 19:50:11 runner 0 connected 2025/08/27 19:50:13 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:50:16 runner 8 connected 2025/08/27 19:50:17 VM-5 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:63794: connect: connection refused 2025/08/27 19:50:17 VM-5 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:63794: connect: connection refused 2025/08/27 19:50:21 VM-1 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:2169: connect: connection refused 2025/08/27 19:50:21 VM-1 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:2169: connect: connection refused 2025/08/27 19:50:26 runner 3 connected 2025/08/27 19:50:27 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:50:31 base crash: lost connection to test machine 2025/08/27 19:50:31 VM-4 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:34048: connect: connection refused 2025/08/27 19:50:31 VM-4 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:34048: connect: connection refused 2025/08/27 19:50:41 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:50:44 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:55345: connect: connection refused 2025/08/27 19:50:44 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:55345: connect: connection refused 2025/08/27 19:50:45 VM-8 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:33703: connect: connection refused 2025/08/27 19:50:45 VM-8 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:33703: connect: connection refused 2025/08/27 19:50:50 VM-3 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:13721: connect: connection refused 2025/08/27 19:50:50 VM-3 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:13721: connect: connection refused 2025/08/27 19:50:54 base crash: lost connection to test machine 2025/08/27 19:50:55 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:51:00 base crash: lost connection to test machine 2025/08/27 19:51:01 runner 7 connected 2025/08/27 19:51:06 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:41169: connect: connection refused 2025/08/27 19:51:06 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:41169: connect: connection refused 2025/08/27 19:51:16 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:51:17 runner 5 connected 2025/08/27 19:51:19 runner 1 connected 2025/08/27 19:51:26 VM-7 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:57732: connect: connection refused 2025/08/27 19:51:26 VM-7 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:57732: connect: connection refused 2025/08/27 19:51:30 runner 4 connected 2025/08/27 19:51:36 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:51:43 runner 0 connected 2025/08/27 19:51:44 runner 8 connected 2025/08/27 19:51:49 base crash: kernel BUG in f2fs_evict_inode 2025/08/27 19:51:50 runner 3 connected 2025/08/27 19:51:51 patched crashed: general protection fault in device_move [need repro = false] 2025/08/27 19:51:52 VM-4 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:4558: connect: connection refused 2025/08/27 19:51:52 VM-4 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:4558: connect: connection refused 2025/08/27 19:51:55 patched crashed: INFO: task hung in v9fs_evict_inode [need repro = false] 2025/08/27 19:52:02 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:52:05 runner 0 connected 2025/08/27 19:52:09 VM-8 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:48902: connect: connection refused 2025/08/27 19:52:09 VM-8 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:48902: connect: connection refused 2025/08/27 19:52:19 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:52:25 VM-5 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:51580: connect: connection refused 2025/08/27 19:52:25 VM-5 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:51580: connect: connection refused 2025/08/27 19:52:25 runner 7 connected 2025/08/27 19:52:26 base crash: INFO: task hung in bch2_journal_reclaim_thread 2025/08/27 19:52:29 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:43631: connect: connection refused 2025/08/27 19:52:29 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:43631: connect: connection refused 2025/08/27 19:52:32 VM-3 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:50328: connect: connection refused 2025/08/27 19:52:32 VM-3 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:50328: connect: connection refused 2025/08/27 19:52:35 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:52:38 runner 1 connected 2025/08/27 19:52:38 runner 6 connected 2025/08/27 19:52:39 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:52:42 base crash: lost connection to test machine 2025/08/27 19:52:42 runner 9 connected 2025/08/27 19:52:51 runner 4 connected 2025/08/27 19:53:08 runner 8 connected 2025/08/27 19:53:14 runner 2 connected 2025/08/27 19:53:18 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:61904: connect: connection refused 2025/08/27 19:53:18 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:61904: connect: connection refused 2025/08/27 19:53:24 runner 5 connected 2025/08/27 19:53:28 runner 0 connected 2025/08/27 19:53:28 base crash: lost connection to test machine 2025/08/27 19:53:30 runner 3 connected 2025/08/27 19:53:35 VM-8 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:65343: connect: connection refused 2025/08/27 19:53:35 VM-8 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:65343: connect: connection refused 2025/08/27 19:53:42 VM-6 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:25669: connect: connection refused 2025/08/27 19:53:42 VM-6 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:25669: connect: connection refused 2025/08/27 19:53:45 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:53:52 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:54:16 VM-5 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:48230: connect: connection refused 2025/08/27 19:54:16 VM-5 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:48230: connect: connection refused 2025/08/27 19:54:17 runner 0 connected 2025/08/27 19:54:23 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:54:26 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:54:34 runner 8 connected 2025/08/27 19:54:36 VM-2 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:54906: connect: connection refused 2025/08/27 19:54:36 VM-2 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:54906: connect: connection refused 2025/08/27 19:54:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 940, "corpus": 46034, "corpus [files]": 0, "corpus [symbols]": 24920, "cover overflows": 89697, "coverage": 314797, "distributor delayed": 47326, "distributor undelayed": 47323, "distributor violated": 111, "exec candidate": 80039, "exec collide": 19842, "exec fuzz": 37115, "exec gen": 1954, "exec hints": 23814, "exec inject": 0, "exec minimize": 18466, "exec retries": 20, "exec seeds": 2014, "exec smash": 16871, "exec total [base]": 209298, "exec total [new]": 460421, "exec triage": 148041, "executor restarts [base]": 990, "executor restarts [new]": 2028, "fault jobs": 0, "fuzzer jobs": 60, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 2, "hints jobs": 27, "max signal": 321352, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 10707, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47733, "no exec duration": 135108000000, "no exec requests": 505, "pending": 1, "prog exec time": 807, "reproducing": 2, "rpc recv": 23064349656, "rpc sent": 7148643976, "signal": 308524, "smash jobs": 22, "triage jobs": 11, "vm output": 131728587, "vm restarts [base]": 109, "vm restarts [new]": 223 } 2025/08/27 19:54:40 runner 6 connected 2025/08/27 19:54:46 VM-9 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:34451: connect: connection refused 2025/08/27 19:54:46 VM-9 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:34451: connect: connection refused 2025/08/27 19:54:46 base crash: lost connection to test machine 2025/08/27 19:54:53 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:11628: connect: connection refused 2025/08/27 19:54:53 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:11628: connect: connection refused 2025/08/27 19:54:56 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:55:03 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:55:12 runner 4 connected 2025/08/27 19:55:16 runner 5 connected 2025/08/27 19:55:23 VM-6 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:29249: connect: connection refused 2025/08/27 19:55:23 VM-6 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:29249: connect: connection refused 2025/08/27 19:55:33 VM-7 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:4048: connect: connection refused 2025/08/27 19:55:33 VM-7 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:4048: connect: connection refused 2025/08/27 19:55:33 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:55:35 VM-4 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:6999: connect: connection refused 2025/08/27 19:55:35 VM-4 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:6999: connect: connection refused 2025/08/27 19:55:35 runner 2 connected 2025/08/27 19:55:38 VM-8 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:46238: connect: connection refused 2025/08/27 19:55:38 VM-8 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:46238: connect: connection refused 2025/08/27 19:55:43 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:55:44 runner 9 connected 2025/08/27 19:55:45 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:55:46 VM-1 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:53948: connect: connection refused 2025/08/27 19:55:46 VM-1 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:53948: connect: connection refused 2025/08/27 19:55:48 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:55:50 VM-3 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:27585: connect: connection refused 2025/08/27 19:55:50 VM-3 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:27585: connect: connection refused 2025/08/27 19:55:53 runner 0 connected 2025/08/27 19:55:56 base crash: lost connection to test machine 2025/08/27 19:56:00 base crash: lost connection to test machine 2025/08/27 19:56:00 VM-5 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:56793: connect: connection refused 2025/08/27 19:56:00 VM-5 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:56793: connect: connection refused 2025/08/27 19:56:01 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:1722: connect: connection refused 2025/08/27 19:56:01 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:1722: connect: connection refused 2025/08/27 19:56:10 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:56:11 base crash: lost connection to test machine 2025/08/27 19:56:14 runner 6 connected 2025/08/27 19:56:26 VM-9 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:15547: connect: connection refused 2025/08/27 19:56:26 VM-9 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:15547: connect: connection refused 2025/08/27 19:56:32 runner 7 connected 2025/08/27 19:56:34 runner 4 connected 2025/08/27 19:56:36 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:56:37 runner 8 connected 2025/08/27 19:56:37 VM-6 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:59762: connect: connection refused 2025/08/27 19:56:37 VM-6 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:59762: connect: connection refused 2025/08/27 19:56:41 runner 3 connected 2025/08/27 19:56:44 runner 1 connected 2025/08/27 19:56:47 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:56:48 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:56:58 runner 5 connected 2025/08/27 19:57:00 runner 0 connected 2025/08/27 19:57:04 VM-7 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:17473: connect: connection refused 2025/08/27 19:57:04 VM-7 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:17473: connect: connection refused 2025/08/27 19:57:06 VM-3 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:30708: connect: connection refused 2025/08/27 19:57:06 VM-3 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:30708: connect: connection refused 2025/08/27 19:57:07 VM-4 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:5133: connect: connection refused 2025/08/27 19:57:07 VM-4 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:5133: connect: connection refused 2025/08/27 19:57:14 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:57:16 base crash: lost connection to test machine 2025/08/27 19:57:17 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:57:26 runner 9 connected 2025/08/27 19:57:31 VM-8 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:17095: connect: connection refused 2025/08/27 19:57:31 VM-8 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:17095: connect: connection refused 2025/08/27 19:57:37 runner 6 connected 2025/08/27 19:57:37 runner 0 connected 2025/08/27 19:57:41 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:58:03 runner 7 connected 2025/08/27 19:58:04 VM-6 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:11860: connect: connection refused 2025/08/27 19:58:04 VM-6 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:11860: connect: connection refused 2025/08/27 19:58:06 runner 3 connected 2025/08/27 19:58:06 runner 4 connected 2025/08/27 19:58:12 VM-2 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:63677: connect: connection refused 2025/08/27 19:58:12 VM-2 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:63677: connect: connection refused 2025/08/27 19:58:14 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 19:58:22 base crash: lost connection to test machine 2025/08/27 19:58:30 runner 8 connected 2025/08/27 19:58:39 base crash: lost connection to test machine 2025/08/27 19:58:39 base crash: WARNING in xfrm6_tunnel_net_exit 2025/08/27 19:59:03 runner 6 connected 2025/08/27 19:59:11 runner 2 connected 2025/08/27 19:59:28 runner 0 connected 2025/08/27 19:59:30 runner 3 connected 2025/08/27 19:59:34 VM-2 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:9120: connect: connection refused 2025/08/27 19:59:34 VM-2 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:9120: connect: connection refused 2025/08/27 19:59:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 970, "corpus": 46054, "corpus [files]": 0, "corpus [symbols]": 24929, "cover overflows": 91029, "coverage": 314833, "distributor delayed": 47378, "distributor undelayed": 47378, "distributor violated": 111, "exec candidate": 80039, "exec collide": 20349, "exec fuzz": 38072, "exec gen": 2005, "exec hints": 24709, "exec inject": 0, "exec minimize": 18867, "exec retries": 20, "exec seeds": 2071, "exec smash": 17425, "exec total [base]": 211295, "exec total [new]": 463955, "exec triage": 148129, "executor restarts [base]": 1021, "executor restarts [new]": 2100, "fault jobs": 0, "fuzzer jobs": 38, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 7, "hints jobs": 21, "max signal": 321401, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 10975, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47770, "no exec duration": 141405000000, "no exec requests": 512, "pending": 1, "prog exec time": 604, "reproducing": 2, "rpc recv": 24060544292, "rpc sent": 7329306904, "signal": 308554, "smash jobs": 9, "triage jobs": 8, "vm output": 136117031, "vm restarts [base]": 117, "vm restarts [new]": 240 } 2025/08/27 19:59:44 base crash: lost connection to test machine 2025/08/27 19:59:50 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/08/27 19:59:50 patched crashed: unregister_netdevice: waiting for DEV to become free [need repro = false] 2025/08/27 20:00:26 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 20:00:33 runner 2 connected 2025/08/27 20:00:38 runner 5 connected 2025/08/27 20:00:41 runner 6 connected 2025/08/27 20:00:53 VM-3 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:19424: connect: connection refused 2025/08/27 20:00:53 VM-3 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:19424: connect: connection refused 2025/08/27 20:00:56 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:55337: connect: connection refused 2025/08/27 20:00:56 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:55337: connect: connection refused 2025/08/27 20:01:03 base crash: lost connection to test machine 2025/08/27 20:01:06 base crash: lost connection to test machine 2025/08/27 20:01:12 VM-2 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:13975: connect: connection refused 2025/08/27 20:01:12 VM-2 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:13975: connect: connection refused 2025/08/27 20:01:14 runner 7 connected 2025/08/27 20:01:22 base crash: lost connection to test machine 2025/08/27 20:01:51 runner 3 connected 2025/08/27 20:01:55 runner 0 connected 2025/08/27 20:02:12 runner 2 connected 2025/08/27 20:02:17 patched crashed: kernel BUG in may_open [need repro = false] 2025/08/27 20:02:19 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/08/27 20:02:23 patched crashed: kernel BUG in jfs_evict_inode [need repro = false] 2025/08/27 20:03:06 runner 5 connected 2025/08/27 20:03:08 runner 7 connected 2025/08/27 20:03:12 runner 6 connected 2025/08/27 20:03:45 base crash: INFO: task hung in sync_bdevs 2025/08/27 20:04:27 VM-9 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:32926: connect: connection refused 2025/08/27 20:04:27 VM-9 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:32926: connect: connection refused 2025/08/27 20:04:34 runner 1 connected 2025/08/27 20:04:36 patched crashed: WARNING in dbAdjTree [need repro = false] 2025/08/27 20:04:37 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 20:04:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 1011, "corpus": 46082, "corpus [files]": 0, "corpus [symbols]": 24949, "cover overflows": 93476, "coverage": 314878, "distributor delayed": 47446, "distributor undelayed": 47446, "distributor violated": 111, "exec candidate": 80039, "exec collide": 21498, "exec fuzz": 40288, "exec gen": 2129, "exec hints": 26738, "exec inject": 0, "exec minimize": 19673, "exec retries": 21, "exec seeds": 2149, "exec smash": 18138, "exec total [base]": 214563, "exec total [new]": 471211, "exec triage": 148270, "executor restarts [base]": 1056, "executor restarts [new]": 2148, "fault jobs": 0, "fuzzer jobs": 26, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 5, "hints jobs": 13, "max signal": 321567, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 11377, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47823, "no exec duration": 141444000000, "no exec requests": 513, "pending": 1, "prog exec time": 625, "reproducing": 2, "rpc recv": 24621512668, "rpc sent": 7527860944, "signal": 308591, "smash jobs": 4, "triage jobs": 9, "vm output": 140504893, "vm restarts [base]": 122, "vm restarts [new]": 246 } 2025/08/27 20:04:50 base crash: WARNING in dbAdjTree 2025/08/27 20:04:58 base crash: kernel BUG in may_open 2025/08/27 20:05:24 runner 6 connected 2025/08/27 20:05:26 runner 9 connected 2025/08/27 20:05:39 runner 2 connected 2025/08/27 20:05:48 runner 1 connected 2025/08/27 20:06:16 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 20:06:30 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 20:06:35 base crash "possible deadlock in ocfs2_evict_inode" is already known 2025/08/27 20:06:35 patched crashed: possible deadlock in ocfs2_evict_inode [need repro = false] 2025/08/27 20:07:12 runner 9 connected 2025/08/27 20:07:20 runner 6 connected 2025/08/27 20:07:24 runner 8 connected 2025/08/27 20:07:36 base crash: WARNING in bch2_trans_srcu_unlock 2025/08/27 20:07:53 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:32463: connect: connection refused 2025/08/27 20:07:53 VM-0 failed reading regs: qemu hmp command 'info registers': dial tcp 127.0.0.1:32463: connect: connection refused 2025/08/27 20:08:03 base crash: lost connection to test machine 2025/08/27 20:08:23 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 20:08:25 runner 3 connected 2025/08/27 20:08:52 runner 0 connected 2025/08/27 20:09:13 runner 8 connected 2025/08/27 20:09:25 patched crashed: WARNING in __rate_control_send_low [need repro = true] 2025/08/27 20:09:25 scheduled a reproduction of 'WARNING in __rate_control_send_low' 2025/08/27 20:09:25 start reproducing 'WARNING in __rate_control_send_low' 2025/08/27 20:09:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 1042, "corpus": 46101, "corpus [files]": 0, "corpus [symbols]": 24962, "cover overflows": 96624, "coverage": 314927, "distributor delayed": 47538, "distributor undelayed": 47538, "distributor violated": 111, "exec candidate": 80039, "exec collide": 23026, "exec fuzz": 43320, "exec gen": 2278, "exec hints": 28860, "exec inject": 0, "exec minimize": 20218, "exec retries": 21, "exec seeds": 2208, "exec smash": 18654, "exec total [base]": 218139, "exec total [new]": 479353, "exec triage": 148460, "executor restarts [base]": 1090, "executor restarts [new]": 2201, "fault jobs": 0, "fuzzer jobs": 21, "fuzzing VMs [base]": 4, "fuzzing VMs [new]": 6, "hints jobs": 11, "max signal": 321692, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 11647, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47884, "no exec duration": 150444000000, "no exec requests": 522, "pending": 1, "prog exec time": 691, "reproducing": 3, "rpc recv": 25191151736, "rpc sent": 7755561088, "signal": 308627, "smash jobs": 2, "triage jobs": 8, "vm output": 144756700, "vm restarts [base]": 126, "vm restarts [new]": 252 } 2025/08/27 20:10:12 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 20:11:01 runner 4 connected 2025/08/27 20:11:04 patched crashed: WARNING in xfrm6_tunnel_net_exit [need repro = false] 2025/08/27 20:11:08 base crash: KASAN: slab-use-after-free Read in __xfrm_state_lookup 2025/08/27 20:11:11 base crash: WARNING in xfrm6_tunnel_net_exit 2025/08/27 20:11:29 base crash: KASAN: slab-use-after-free Read in __xfrm_state_lookup 2025/08/27 20:11:52 runner 7 connected 2025/08/27 20:11:57 runner 2 connected 2025/08/27 20:11:59 runner 3 connected 2025/08/27 20:12:10 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 20:12:18 runner 1 connected 2025/08/27 20:13:06 runner 6 connected 2025/08/27 20:13:37 base crash: lost connection to test machine 2025/08/27 20:13:55 patched crashed: unregister_netdevice: waiting for DEV to become free [need repro = false] 2025/08/27 20:14:24 base crash: possible deadlock in ocfs2_try_remove_refcount_tree 2025/08/27 20:14:26 runner 2 connected 2025/08/27 20:14:32 patched crashed: WARNING in xfrm_state_fini [need repro = false] 2025/08/27 20:14:38 STAT { "buffer too small": 2, "candidate triage jobs": 0, "candidates": 0, "comps overflows": 1047, "corpus": 46115, "corpus [files]": 0, "corpus [symbols]": 24969, "cover overflows": 97782, "coverage": 314948, "distributor delayed": 47594, "distributor undelayed": 47593, "distributor violated": 111, "exec candidate": 80039, "exec collide": 24067, "exec fuzz": 45238, "exec gen": 2375, "exec hints": 30017, "exec inject": 0, "exec minimize": 20580, "exec retries": 21, "exec seeds": 2249, "exec smash": 18944, "exec total [base]": 221812, "exec total [new]": 484343, "exec triage": 148549, "executor restarts [base]": 1134, "executor restarts [new]": 2320, "fault jobs": 0, "fuzzer jobs": 15, "fuzzing VMs [base]": 3, "fuzzing VMs [new]": 3, "hints jobs": 7, "max signal": 321777, "minimize: array": 0, "minimize: buffer": 0, "minimize: call": 11887, "minimize: filename": 0, "minimize: integer": 0, "minimize: pointer": 0, "minimize: props": 0, "minimize: resource": 0, "modules [base]": 1, "modules [new]": 1, "new inputs": 47919, "no exec duration": 150989000000, "no exec requests": 524, "pending": 1, "prog exec time": 856, "reproducing": 3, "rpc recv": 25636524108, "rpc sent": 7934499504, "signal": 308648, "smash jobs": 1, "triage jobs": 7, "vm output": 149736584, "vm restarts [base]": 130, "vm restarts [new]": 255 } 2025/08/27 20:14:43 runner 9 connected 2025/08/27 20:14:46 patched crashed: lost connection to test machine [need repro = false] 2025/08/27 20:15:13 runner 1 connected 2025/08/27 20:15:21 runner 7 connected 2025/08/27 20:15:37 runner 4 connected 2025/08/27 20:17:22 patched crashed: INFO: task hung in bch2_journal_reclaim_thread [need repro = false] 2025/08/27 20:17:51 patched crashed: possible deadlock in ocfs2_init_acl [need repro = false] 2025/08/27 20:18:12 runner 8 connected 2025/08/27 20:18:13 base crash: KASAN: slab-use-after-free Read in jfs_lazycommit 2025/08/27 20:18:14 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 20:18:41 runner 7 connected 2025/08/27 20:19:01 runner 0 connected 2025/08/27 20:19:02 runner 4 connected 2025/08/27 20:19:04 patched crashed: possible deadlock in ocfs2_try_remove_refcount_tree [need repro = false] 2025/08/27 20:19:33 bug reporting terminated 2025/08/27 20:19:33 status reporting terminated 2025/08/27 20:19:49 syz-diff (base): kernel context loop terminated 2025/08/27 20:20:37 repro finished 'WARNING in __rate_control_send_low', repro=false crepro=false desc='' hub=false from_dashboard=false 2025/08/27 20:21:11 repro finished 'WARNING in bch2_trans_srcu_unlock', repro=false crepro=false desc='' hub=false from_dashboard=false 2025/08/27 20:22:01 repro finished 'INFO: task hung in reg_check_chans_work', repro=false crepro=false desc='' hub=false from_dashboard=false 2025/08/27 20:22:01 syz-diff (new): kernel context loop terminated 2025/08/27 20:22:01 diff fuzzing terminated 2025/08/27 20:22:01 fuzzing is finished 2025/08/27 20:22:01 status at the end: Title On-Base On-Patched INFO: rcu detected stall in do_idle 1 crashes INFO: rcu detected stall in worker_thread 1 crashes INFO: task hung in __closure_sync 1 crashes 1 crashes INFO: task hung in bch2_journal_reclaim_thread 8 crashes 12 crashes INFO: task hung in corrupted 1 crashes INFO: task hung in read_part_sector 1 crashes INFO: task hung in reg_check_chans_work 2 crashes INFO: task hung in sync_bdevs 1 crashes 1 crashes INFO: task hung in v9fs_evict_inode 1 crashes 5 crashes INFO: trying to register non-static key in ocfs2_dlm_shutdown 1 crashes KASAN: slab-out-of-bounds Read in dtSplitPage 1 crashes[reproduced] KASAN: slab-use-after-free Read in __usb_hcd_giveback_urb 1 crashes KASAN: slab-use-after-free Read in __xfrm_state_lookup 3 crashes 4 crashes KASAN: slab-use-after-free Read in jfs_lazycommit 1 crashes 1 crashes KASAN: slab-use-after-free Read in l2cap_unregister_user 1 crashes KASAN: slab-use-after-free Read in xfrm_alloc_spi 4 crashes 15 crashes KASAN: slab-use-after-free Read in xfrm_state_find 2 crashes 2 crashes WARNING in __linkwatch_sync_dev 1 crashes WARNING in __rate_control_send_low 1 crashes WARNING in bch2_trans_srcu_unlock 1 crashes 1 crashes WARNING in dbAdjTree 5 crashes 10 crashes WARNING in io_ring_exit_work 1 crashes WARNING in udf_setsize 1 crashes WARNING in xfrm6_tunnel_net_exit 8 crashes 11 crashes WARNING in xfrm_state_fini 7 crashes 20 crashes general protection fault in device_move 1 crashes 2 crashes general protection fault in pcl818_ai_cancel 5 crashes 4 crashes kernel BUG in f2fs_evict_inode 1 crashes kernel BUG in jfs_evict_inode 3 crashes 9 crashes kernel BUG in may_open 3 crashes 3 crashes kernel BUG in ocfs2_set_new_buffer_uptodate 1 crashes 2 crashes kernel BUG in ocfs2_write_cluster_by_desc 2 crashes kernel BUG in txUnlock 1 crashes 7 crashes lost connection to test machine 28 crashes 65 crashes no output from test machine 1 crashes possible deadlock in __netdev_update_features 1 crashes possible deadlock in mark_as_free_ex 1 crashes possible deadlock in ocfs2_calc_xattr_init 2 crashes possible deadlock in ocfs2_evict_inode 1 crashes possible deadlock in ocfs2_init_acl 13 crashes 16 crashes possible deadlock in ocfs2_reserve_local_alloc_bits 2 crashes possible deadlock in ocfs2_reserve_suballoc_bits 1 crashes 5 crashes possible deadlock in ocfs2_setattr 1 crashes possible deadlock in ocfs2_try_remove_refcount_tree 16 crashes 24 crashes possible deadlock in ocfs2_xattr_set 1 crashes possible deadlock in run_unpack_ex 3 crashes 2 crashes unregister_netdevice: waiting for DEV to become free 5 crashes 9 crashes