Vendor-specific VFIO driver for Xe will implement VF migration. Export everything that's needed for migration ops. Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/Makefile | 2 + drivers/gpu/drm/xe/xe_sriov_vfio.c | 248 +++++++++++++++++++++++++++++ include/drm/intel/xe_sriov_vfio.h | 30 ++++ 3 files changed, 280 insertions(+) create mode 100644 drivers/gpu/drm/xe/xe_sriov_vfio.c create mode 100644 include/drm/intel/xe_sriov_vfio.h diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index 3d72db9e528e4..de3778f51ce7e 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -182,6 +182,8 @@ xe-$(CONFIG_PCI_IOV) += \ xe_sriov_pf_service.o \ xe_tile_sriov_pf_debugfs.o +xe-$(CONFIG_XE_VFIO_PCI) += xe_sriov_vfio.o + # include helpers for tests even when XE is built-in ifdef CONFIG_DRM_XE_KUNIT_TEST xe-y += tests/xe_kunit_helpers.o diff --git a/drivers/gpu/drm/xe/xe_sriov_vfio.c b/drivers/gpu/drm/xe/xe_sriov_vfio.c new file mode 100644 index 0000000000000..9b02a86132e8b --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_vfio.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include + +#include "xe_assert.h" +#include "xe_pci.h" +#include "xe_pm.h" +#include "xe_sriov_migration_data.h" +#include "xe_sriov_pf_control.h" +#include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_migration.h" + +/** + * xe_sriov_vfio_get_xe_device() - Get PF &xe_device. + * @pdev: the VF &pci_dev device + * + * Return: pointer to PF &xe_device, NULL otherwise. + */ +struct xe_device *xe_sriov_vfio_get_xe_device(struct pci_dev *pdev) +{ + return xe_pci_get_pf_xe_device(pdev); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_get_xe_device, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_migration_supported() - Check if migration is supported. + * @xe: the PF &xe_device + * + * Return: true if migration is supported, false otherwise. + */ +bool xe_sriov_vfio_migration_supported(struct xe_device *xe) +{ + return xe_sriov_pf_migration_supported(xe); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_migration_supported, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_wait_flr_done() - Wait for VF FLR completion. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * This function will wait until VF FLR is processed by PF on all tiles (or + * until timeout occurs). + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_wait_flr_done(struct xe_device *xe, unsigned int vfid) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + return xe_sriov_pf_control_wait_flr(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_wait_flr_done, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_suspend_device() - Suspend VF. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * This function will pause VF on all tiles/GTs. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_suspend_device(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + return xe_sriov_pf_control_pause_vf(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_suspend_device, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_resume_device() - Resume VF. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * This function will resume VF on all tiles. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_resume_device(struct xe_device *xe, unsigned int vfid) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + return xe_sriov_pf_control_resume_vf(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_resume_device, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_stop_copy_enter() - Initiate a VF device migration data save. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_stop_copy_enter(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + return xe_sriov_pf_control_trigger_save_vf(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_stop_copy_enter, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_stop_copy_exit() - Finish a VF device migration data save. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_stop_copy_exit(struct xe_device *xe, unsigned int vfid) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + return xe_sriov_pf_control_finish_save_vf(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_stop_copy_exit, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_resume_data_enter() - Initiate a VF device migration data restore. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_resume_data_enter(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + return xe_sriov_pf_control_trigger_restore_vf(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_resume_data_enter, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_resume_data_exit() - Finish a VF device migration data restore. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_resume_data_exit(struct xe_device *xe, unsigned int vfid) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + return xe_sriov_pf_control_finish_restore_vf(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_resume_data_exit, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_error() - Move VF device to error state. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * Reset is needed to move it out of error state. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_error(struct xe_device *xe, unsigned int vfid) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + return xe_sriov_pf_control_stop_vf(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_error, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_data_read() - Read migration data from the VF device. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * @buf: start address of userspace buffer + * @len: requested read size from userspace + * + * Return: number of bytes that has been successfully read, + * 0 if no more migration data is available, -errno on failure. + */ +ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + return xe_sriov_migration_data_read(xe, vfid, buf, len); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_read, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_data_write() - Write migration data to the VF device. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * @buf: start address of userspace buffer + * @len: requested write size from userspace + * + * Return: number of bytes that has been successfully written, -errno on failure. + */ +ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + return xe_sriov_migration_data_write(xe, vfid, buf, len); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_write, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_stop_copy_size() - Get a size estimate of VF device migration data. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * Return: migration data size in bytes or a negative error code on failure. + */ +ssize_t xe_sriov_vfio_stop_copy_size(struct xe_device *xe, unsigned int vfid) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + return xe_sriov_pf_migration_size(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_stop_copy_size, "xe-vfio-pci"); diff --git a/include/drm/intel/xe_sriov_vfio.h b/include/drm/intel/xe_sriov_vfio.h new file mode 100644 index 0000000000000..5b02f66465ed9 --- /dev/null +++ b/include/drm/intel/xe_sriov_vfio.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_VFIO_H_ +#define _XE_SRIOV_VFIO_H_ + +#include + +struct pci_dev; +struct xe_device; + +struct xe_device *xe_sriov_vfio_get_xe_device(struct pci_dev *pdev); +bool xe_sriov_vfio_migration_supported(struct xe_device *xe); +int xe_sriov_vfio_wait_flr_done(struct xe_device *xe, unsigned int vfid); +int xe_sriov_vfio_suspend_device(struct xe_device *xe, unsigned int vfid); +int xe_sriov_vfio_resume_device(struct xe_device *xe, unsigned int vfid); +int xe_sriov_vfio_stop_copy_enter(struct xe_device *xe, unsigned int vfid); +int xe_sriov_vfio_stop_copy_exit(struct xe_device *xe, unsigned int vfid); +int xe_sriov_vfio_resume_data_enter(struct xe_device *xe, unsigned int vfid); +int xe_sriov_vfio_resume_data_exit(struct xe_device *xe, unsigned int vfid); +int xe_sriov_vfio_error(struct xe_device *xe, unsigned int vfid); +ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len); +ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len); +ssize_t xe_sriov_vfio_stop_copy_size(struct xe_device *xe, unsigned int vfid); + +#endif -- 2.50.1