Since commit 4eb0aab6e4434 ("drm/xe/guc: Bump minimum required GuC version to v70.29.2"), the minimum GuC version required by the driver is v70.29.2, which should already include everything that we need for migration. Remove the version check. Suggested-by: Michal Wajdeczko Signed-off-by: Michał Winiarski Reviewed-by: Michal Wajdeczko --- drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c index 44cc612b0a752..a5bf327ef8889 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c @@ -384,9 +384,6 @@ ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int static bool pf_check_migration_support(struct xe_gt *gt) { - /* GuC 70.25 with save/restore v2 is required */ - xe_gt_assert(gt, GUC_FIRMWARE_VER(>->uc.guc) >= MAKE_GUC_VER(70, 25, 0)); - /* XXX: for now this is for feature enabling only */ return IS_ENABLED(CONFIG_DRM_XE_DEBUG); } -- 2.50.1 Upcoming changes will allow users to control VF state and obtain its migration data with a device-level granularity (not tile/gt). Change the data structures to reflect that and move the GT-level migration init to happen after device-level init. Signed-off-by: Michał Winiarski Reviewed-by: Michal Wajdeczko --- drivers/gpu/drm/xe/Makefile | 1 + drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 12 +----- .../drm/xe/xe_gt_sriov_pf_migration_types.h | 3 -- drivers/gpu/drm/xe/xe_sriov_pf.c | 5 +++ drivers/gpu/drm/xe/xe_sriov_pf_migration.c | 41 +++++++++++++++++++ drivers/gpu/drm/xe/xe_sriov_pf_migration.h | 16 ++++++++ .../gpu/drm/xe/xe_sriov_pf_migration_types.h | 0 drivers/gpu/drm/xe/xe_sriov_pf_types.h | 6 +++ 8 files changed, 71 insertions(+), 13 deletions(-) create mode 100644 drivers/gpu/drm/xe/xe_sriov_pf_migration.c create mode 100644 drivers/gpu/drm/xe/xe_sriov_pf_migration.h create mode 100644 drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index 82c6b3d296769..89e5b26c27975 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -176,6 +176,7 @@ xe-$(CONFIG_PCI_IOV) += \ xe_sriov_pf.o \ xe_sriov_pf_control.o \ xe_sriov_pf_debugfs.o \ + xe_sriov_pf_migration.o \ xe_sriov_pf_provision.o \ xe_sriov_pf_service.o \ xe_tile_sriov_pf_debugfs.o diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c index a5bf327ef8889..ca28f45aaf481 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c @@ -13,6 +13,7 @@ #include "xe_guc.h" #include "xe_guc_ct.h" #include "xe_sriov.h" +#include "xe_sriov_pf_migration.h" /* Return: number of dwords saved/restored/required or a negative error code on failure */ static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode, @@ -115,8 +116,7 @@ static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid, static bool pf_migration_supported(struct xe_gt *gt) { - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); - return gt->sriov.pf.migration.supported; + return xe_sriov_pf_migration_supported(gt_to_xe(gt)); } static struct mutex *pf_migration_mutex(struct xe_gt *gt) @@ -382,12 +382,6 @@ ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int } #endif /* CONFIG_DEBUG_FS */ -static bool pf_check_migration_support(struct xe_gt *gt) -{ - /* XXX: for now this is for feature enabling only */ - return IS_ENABLED(CONFIG_DRM_XE_DEBUG); -} - /** * xe_gt_sriov_pf_migration_init() - Initialize support for VF migration. * @gt: the &xe_gt @@ -403,8 +397,6 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt) xe_gt_assert(gt, IS_SRIOV_PF(xe)); - gt->sriov.pf.migration.supported = pf_check_migration_support(gt); - if (!pf_migration_supported(gt)) return 0; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h index 1f3110b6d44fa..9d672feac5f04 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h @@ -30,9 +30,6 @@ struct xe_gt_sriov_state_snapshot { * Used by the PF driver to maintain non-VF specific per-GT data. */ struct xe_gt_sriov_pf_migration { - /** @supported: indicates whether the feature is supported */ - bool supported; - /** @snapshot_lock: protects all VFs snapshots */ struct mutex snapshot_lock; }; diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.c b/drivers/gpu/drm/xe/xe_sriov_pf.c index bc1ab9ee31d92..95743c7af8050 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf.c @@ -15,6 +15,7 @@ #include "xe_sriov.h" #include "xe_sriov_pf.h" #include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_migration.h" #include "xe_sriov_pf_service.h" #include "xe_sriov_printk.h" @@ -101,6 +102,10 @@ int xe_sriov_pf_init_early(struct xe_device *xe) if (err) return err; + err = xe_sriov_pf_migration_init(xe); + if (err) + return err; + xe_sriov_pf_service_init(xe); return 0; diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c new file mode 100644 index 0000000000000..8c523c392f98b --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include "xe_sriov.h" +#include "xe_sriov_pf_migration.h" + +/** + * xe_sriov_pf_migration_supported() - Check if SR-IOV VF migration is supported by the device + * @xe: the &xe_device + * + * Return: true if migration is supported, false otherwise + */ +bool xe_sriov_pf_migration_supported(struct xe_device *xe) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + + return xe->sriov.pf.migration.supported; +} + +static bool pf_check_migration_support(struct xe_device *xe) +{ + /* XXX: for now this is for feature enabling only */ + return IS_ENABLED(CONFIG_DRM_XE_DEBUG); +} + +/** + * xe_sriov_pf_migration_init() - Initialize support for SR-IOV VF migration. + * @xe: the &xe_device + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_migration_init(struct xe_device *xe) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + + xe->sriov.pf.migration.supported = pf_check_migration_support(xe); + + return 0; +} diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h new file mode 100644 index 0000000000000..d2b4a24165438 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_PF_MIGRATION_H_ +#define _XE_SRIOV_PF_MIGRATION_H_ + +#include + +struct xe_device; + +int xe_sriov_pf_migration_init(struct xe_device *xe); +bool xe_sriov_pf_migration_supported(struct xe_device *xe); + +#endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_types.h index c753cd59aed2b..24d22afeececa 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_types.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf_types.h @@ -39,6 +39,12 @@ struct xe_device_pf { /** @provision: device level provisioning data. */ struct xe_sriov_pf_provision provision; + /** @migration: device level VF migration data */ + struct { + /** @migration.supported: indicates whether VF migration feature is supported */ + bool supported; + } migration; + /** @service: device level service data. */ struct xe_sriov_pf_service service; -- 2.50.1 In upcoming changes, the number of states will increase as a result of introducing SAVE and RESTORE states. This means that using unsigned long as underlying storage won't work on 32-bit architectures, as we'll run out of bits. Use bitmap instead. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202510231918.XlOqymLC-lkp@intel.com/ Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c | 2 +- drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index 9de05db1f0905..8a2577fda4198 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -225,7 +225,7 @@ static unsigned long *pf_peek_vf_state(struct xe_gt *gt, unsigned int vfid) { struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid); - return &cs->state; + return cs->state; } static bool pf_check_vf_state(struct xe_gt *gt, unsigned int vfid, diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h index c80b7e77f1ad2..3ba6ad886c939 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h @@ -73,7 +73,8 @@ enum xe_gt_sriov_control_bits { XE_GT_SRIOV_STATE_STOP_FAILED, XE_GT_SRIOV_STATE_STOPPED, - XE_GT_SRIOV_STATE_MISMATCH = BITS_PER_LONG - 1, + XE_GT_SRIOV_STATE_MISMATCH, + XE_GT_SRIOV_STATE_MAX, }; /** @@ -83,7 +84,7 @@ enum xe_gt_sriov_control_bits { */ struct xe_gt_sriov_control_state { /** @state: VF state bits */ - unsigned long state; + DECLARE_BITMAP(state, XE_GT_SRIOV_STATE_MAX); /** @done: completion of async operations */ struct completion done; -- 2.50.1 The states will be used by upcoming changes to produce (in case of save) or consume (in case of resume) the VF migration data. Signed-off-by: Michał Winiarski Reviewed-by: Michal Wajdeczko --- drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c | 248 ++++++++++++++++++ drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h | 6 + .../gpu/drm/xe/xe_gt_sriov_pf_control_types.h | 14 + drivers/gpu/drm/xe/xe_sriov_pf_control.c | 96 +++++++ drivers/gpu/drm/xe/xe_sriov_pf_control.h | 4 + drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c | 38 +++ 6 files changed, 406 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index 8a2577fda4198..a571e1c02a3b4 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -184,6 +184,12 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit) CASE2STR(PAUSE_SAVE_GUC); CASE2STR(PAUSE_FAILED); CASE2STR(PAUSED); + CASE2STR(SAVE_WIP); + CASE2STR(SAVE_FAILED); + CASE2STR(SAVED); + CASE2STR(RESTORE_WIP); + CASE2STR(RESTORE_FAILED); + CASE2STR(RESTORED); CASE2STR(RESUME_WIP); CASE2STR(RESUME_SEND_RESUME); CASE2STR(RESUME_FAILED); @@ -208,6 +214,8 @@ static unsigned long pf_get_default_timeout(enum xe_gt_sriov_control_bits bit) case XE_GT_SRIOV_STATE_FLR_WIP: case XE_GT_SRIOV_STATE_FLR_RESET_CONFIG: return 5 * HZ; + case XE_GT_SRIOV_STATE_RESTORE_WIP: + return 20 * HZ; default: return HZ; } @@ -329,6 +337,8 @@ static void pf_exit_vf_mismatch(struct xe_gt *gt, unsigned int vfid) pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED); pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED); pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED); } #define pf_enter_vf_state_machine_bug(gt, vfid) ({ \ @@ -359,6 +369,8 @@ static void pf_queue_vf(struct xe_gt *gt, unsigned int vfid) static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid); static void pf_exit_vf_stop_wip(struct xe_gt *gt, unsigned int vfid); +static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid); +static void pf_exit_vf_restore_wip(struct xe_gt *gt, unsigned int vfid); static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid); static void pf_exit_vf_resume_wip(struct xe_gt *gt, unsigned int vfid); @@ -380,6 +392,8 @@ static void pf_exit_vf_wip(struct xe_gt *gt, unsigned int vfid) pf_exit_vf_flr_wip(gt, vfid); pf_exit_vf_stop_wip(gt, vfid); + pf_exit_vf_save_wip(gt, vfid); + pf_exit_vf_restore_wip(gt, vfid); pf_exit_vf_pause_wip(gt, vfid); pf_exit_vf_resume_wip(gt, vfid); @@ -399,6 +413,8 @@ static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid) pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED); pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED); pf_exit_vf_mismatch(gt, vfid); pf_exit_vf_wip(gt, vfid); } @@ -675,6 +691,8 @@ static void pf_enter_vf_resumed(struct xe_gt *gt, unsigned int vfid) { pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED); pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED); pf_exit_vf_mismatch(gt, vfid); pf_exit_vf_wip(gt, vfid); } @@ -753,6 +771,16 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid) return -EPERM; } + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) { + xe_gt_sriov_dbg(gt, "VF%u save is in progress!\n", vfid); + return -EBUSY; + } + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) { + xe_gt_sriov_dbg(gt, "VF%u restore is in progress!\n", vfid); + return -EBUSY; + } + if (!pf_enter_vf_resume_wip(gt, vfid)) { xe_gt_sriov_dbg(gt, "VF%u resume already in progress!\n", vfid); return -EALREADY; @@ -776,6 +804,218 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid) return -ECANCELED; } +static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid) +{ + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP); +} + +static void pf_enter_vf_saved(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED)) + pf_enter_vf_state_machine_bug(gt, vfid); + + xe_gt_sriov_dbg(gt, "VF%u saved!\n", vfid); + + pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + pf_exit_vf_mismatch(gt, vfid); + pf_exit_vf_wip(gt, vfid); +} + +static bool pf_handle_vf_save(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) + return false; + + pf_enter_vf_saved(gt, vfid); + + return true; +} + +static bool pf_enter_vf_save_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) { + pf_enter_vf_wip(gt, vfid); + pf_queue_vf(gt, vfid); + return true; + } + + return false; +} + +/** + * xe_gt_sriov_pf_control_trigger_save_vf() - Start an SR-IOV VF migration data save sequence. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_control_trigger_save_vf(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) { + xe_gt_sriov_dbg(gt, "VF%u is stopped!\n", vfid); + return -EPERM; + } + + if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) { + xe_gt_sriov_dbg(gt, "VF%u is not paused!\n", vfid); + return -EPERM; + } + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) { + xe_gt_sriov_dbg(gt, "VF%u restore is in progress!\n", vfid); + return -EBUSY; + } + + if (!pf_enter_vf_save_wip(gt, vfid)) { + xe_gt_sriov_dbg(gt, "VF%u save already in progress!\n", vfid); + return -EALREADY; + } + + return 0; +} + +/** + * xe_gt_sriov_pf_control_finish_save_vf() - Complete a VF migration data save sequence. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_control_finish_save_vf(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED)) { + pf_enter_vf_mismatch(gt, vfid); + return -EIO; + } + + pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + + return 0; +} + +static void pf_exit_vf_restore_wip(struct xe_gt *gt, unsigned int vfid) +{ + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP); +} + +static void pf_enter_vf_restored(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED)) + pf_enter_vf_state_machine_bug(gt, vfid); + + xe_gt_sriov_dbg(gt, "VF%u restored!\n", vfid); + + pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + pf_exit_vf_mismatch(gt, vfid); + pf_exit_vf_wip(gt, vfid); +} + +static bool pf_handle_vf_restore(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) + return false; + + pf_enter_vf_restored(gt, vfid); + + return true; +} + +static bool pf_enter_vf_restore_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) { + pf_enter_vf_wip(gt, vfid); + pf_queue_vf(gt, vfid); + return true; + } + + return false; +} + +/** + * xe_gt_sriov_pf_control_trigger restore_vf() - Start an SR-IOV VF migration data restore sequence. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_control_trigger_restore_vf(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) { + xe_gt_sriov_dbg(gt, "VF%u is stopped!\n", vfid); + return -EPERM; + } + + if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) { + xe_gt_sriov_dbg(gt, "VF%u is not paused!\n", vfid); + return -EPERM; + } + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) { + xe_gt_sriov_dbg(gt, "VF%u save is in progress!\n", vfid); + return -EBUSY; + } + + if (!pf_enter_vf_restore_wip(gt, vfid)) { + xe_gt_sriov_dbg(gt, "VF%u restore already in progress!\n", vfid); + return -EALREADY; + } + + return 0; +} + +static int pf_wait_vf_restore_done(struct xe_gt *gt, unsigned int vfid) +{ + unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_RESTORE_WIP); + int err; + + err = pf_wait_vf_wip_done(gt, vfid, timeout); + if (err) { + xe_gt_sriov_notice(gt, "VF%u RESTORE didn't finish in %u ms (%pe)\n", + vfid, jiffies_to_msecs(timeout), ERR_PTR(err)); + return err; + } + + if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED)) + return -EIO; + + return 0; +} + +/** + * xe_gt_sriov_pf_control_finish_restore_vf() - Complete a VF migration data restore sequence. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_control_finish_restore_vf(struct xe_gt *gt, unsigned int vfid) +{ + int ret; + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) { + ret = pf_wait_vf_restore_done(gt, vfid); + if (ret) + return ret; + } + + if (!pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED)) { + pf_enter_vf_mismatch(gt, vfid); + return -EIO; + } + + pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + + return 0; +} + /** * DOC: The VF STOP state machine * @@ -817,6 +1057,8 @@ static void pf_enter_vf_stopped(struct xe_gt *gt, unsigned int vfid) pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED); pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED); pf_exit_vf_mismatch(gt, vfid); pf_exit_vf_wip(gt, vfid); } @@ -1463,6 +1705,12 @@ static bool pf_process_vf_state_machine(struct xe_gt *gt, unsigned int vfid) if (pf_exit_vf_pause_save_guc(gt, vfid)) return true; + if (pf_handle_vf_save(gt, vfid)) + return true; + + if (pf_handle_vf_restore(gt, vfid)) + return true; + if (pf_exit_vf_resume_send_resume(gt, vfid)) return true; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h index 8a72ef3778d47..abc233f6302ed 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h @@ -14,8 +14,14 @@ struct xe_gt; int xe_gt_sriov_pf_control_init(struct xe_gt *gt); void xe_gt_sriov_pf_control_restart(struct xe_gt *gt); +bool xe_gt_sriov_pf_control_check_vf_data_wip(struct xe_gt *gt, unsigned int vfid); + int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_control_trigger_save_vf(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_control_finish_save_vf(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_control_trigger_restore_vf(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_control_finish_restore_vf(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_sync_flr(struct xe_gt *gt, unsigned int vfid, bool sync); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h index 3ba6ad886c939..ea16ceb39dabb 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h @@ -31,6 +31,12 @@ * @XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC: indicates that the PF needs to save the VF GuC state. * @XE_GT_SRIOV_STATE_PAUSE_FAILED: indicates that a VF pause operation has failed. * @XE_GT_SRIOV_STATE_PAUSED: indicates that the VF is paused. + * @XE_GT_SRIOV_STATE_SAVE_WIP: indicates that VF save operation is in progress. + * @XE_GT_SRIOV_STATE_SAVE_FAILED: indicates that VF save operation has failed. + * @XE_GT_SRIOV_STATE_SAVED: indicates that VF data is saved. + * @XE_GT_SRIOV_STATE_RESTORE_WIP: indicates that VF restore operation is in progress. + * @XE_GT_SRIOV_STATE_RESTORE_FAILED: indicates that VF restore operation has failed. + * @XE_GT_SRIOV_STATE_RESTORED: indicates that VF data is restored. * @XE_GT_SRIOV_STATE_RESUME_WIP: indicates the a VF resume operation is in progress. * @XE_GT_SRIOV_STATE_RESUME_SEND_RESUME: indicates that the PF is about to send RESUME command. * @XE_GT_SRIOV_STATE_RESUME_FAILED: indicates that a VF resume operation has failed. @@ -63,6 +69,14 @@ enum xe_gt_sriov_control_bits { XE_GT_SRIOV_STATE_PAUSE_FAILED, XE_GT_SRIOV_STATE_PAUSED, + XE_GT_SRIOV_STATE_SAVE_WIP, + XE_GT_SRIOV_STATE_SAVE_FAILED, + XE_GT_SRIOV_STATE_SAVED, + + XE_GT_SRIOV_STATE_RESTORE_WIP, + XE_GT_SRIOV_STATE_RESTORE_FAILED, + XE_GT_SRIOV_STATE_RESTORED, + XE_GT_SRIOV_STATE_RESUME_WIP, XE_GT_SRIOV_STATE_RESUME_SEND_RESUME, XE_GT_SRIOV_STATE_RESUME_FAILED, diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_sriov_pf_control.c index 416d00a03fbb7..8d8a01faf5291 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_control.c @@ -149,3 +149,99 @@ int xe_sriov_pf_control_sync_flr(struct xe_device *xe, unsigned int vfid) return 0; } + +/** + * xe_sriov_pf_control_trigger_save_vf - Start a VF migration data SAVE sequence on all GTs. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_trigger_save_vf(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int ret; + + for_each_gt(gt, xe, id) { + ret = xe_gt_sriov_pf_control_trigger_save_vf(gt, vfid); + if (ret) + return ret; + } + + return 0; +} + +/** + * xe_sriov_pf_control_finish_save_vf - Complete a VF migration data SAVE sequence on all GTs. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_finish_save_vf(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int ret; + + for_each_gt(gt, xe, id) { + ret = xe_gt_sriov_pf_control_finish_save_vf(gt, vfid); + if (ret) + break; + } + + return ret; +} + +/** + * xe_sriov_pf_control_trigger_restore_vf - Start a VF migration data RESTORE sequence on all GTs. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_trigger_restore_vf(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int ret; + + for_each_gt(gt, xe, id) { + ret = xe_gt_sriov_pf_control_trigger_restore_vf(gt, vfid); + if (ret) + return ret; + } + + return ret; +} + +/** + * xe_sriov_pf_control_wait_restore_vf - Complete a VF migration data RESTORE sequence in all GTs. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_finish_restore_vf(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int ret; + + for_each_gt(gt, xe, id) { + ret = xe_gt_sriov_pf_control_finish_restore_vf(gt, vfid); + if (ret) + break; + } + + return ret; +} diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_sriov_pf_control.h index 2d52d0ac1b28f..30318c1fba34e 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_control.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf_control.h @@ -13,5 +13,9 @@ int xe_sriov_pf_control_resume_vf(struct xe_device *xe, unsigned int vfid); int xe_sriov_pf_control_stop_vf(struct xe_device *xe, unsigned int vfid); int xe_sriov_pf_control_reset_vf(struct xe_device *xe, unsigned int vfid); int xe_sriov_pf_control_sync_flr(struct xe_device *xe, unsigned int vfid); +int xe_sriov_pf_control_trigger_save_vf(struct xe_device *xe, unsigned int vfid); +int xe_sriov_pf_control_finish_save_vf(struct xe_device *xe, unsigned int vfid); +int xe_sriov_pf_control_trigger_restore_vf(struct xe_device *xe, unsigned int vfid); +int xe_sriov_pf_control_finish_restore_vf(struct xe_device *xe, unsigned int vfid); #endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c index a81aa05c55326..e0e6340c49106 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c @@ -136,11 +136,31 @@ static void pf_populate_pf(struct xe_device *xe, struct dentry *pfdent) * │ │ ├── reset * │ │ ├── resume * │ │ ├── stop + * │ │ ├── save + * │ │ ├── restore * │ │ : * │ ├── vf2 * │ │ ├── ... */ +static int from_file_read_to_vf_call(struct seq_file *s, + int (*call)(struct xe_device *, unsigned int)) +{ + struct dentry *dent = file_dentry(s->file)->d_parent; + struct xe_device *xe = extract_xe(dent); + unsigned int vfid = extract_vfid(dent); + int ret; + + xe_pm_runtime_get(xe); + ret = call(xe, vfid); + xe_pm_runtime_put(xe); + + if (ret < 0) + return ret; + + return 0; +} + static ssize_t from_file_write_to_vf_call(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos, int (*call)(struct xe_device *, unsigned int)) @@ -179,10 +199,26 @@ static ssize_t OP##_write(struct file *file, const char __user *userbuf, \ } \ DEFINE_SHOW_STORE_ATTRIBUTE(OP) +#define DEFINE_VF_CONTROL_ATTRIBUTE_RW(OP) \ +static int OP##_show(struct seq_file *s, void *unused) \ +{ \ + return from_file_read_to_vf_call(s, \ + xe_sriov_pf_control_finish_##OP); \ +} \ +static ssize_t OP##_write(struct file *file, const char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + return from_file_write_to_vf_call(file, userbuf, count, ppos, \ + xe_sriov_pf_control_trigger_##OP); \ +} \ +DEFINE_SHOW_STORE_ATTRIBUTE(OP) + DEFINE_VF_CONTROL_ATTRIBUTE(pause_vf); DEFINE_VF_CONTROL_ATTRIBUTE(resume_vf); DEFINE_VF_CONTROL_ATTRIBUTE(stop_vf); DEFINE_VF_CONTROL_ATTRIBUTE(reset_vf); +DEFINE_VF_CONTROL_ATTRIBUTE_RW(save_vf); +DEFINE_VF_CONTROL_ATTRIBUTE_RW(restore_vf); static void pf_populate_vf(struct xe_device *xe, struct dentry *vfdent) { @@ -190,6 +226,8 @@ static void pf_populate_vf(struct xe_device *xe, struct dentry *vfdent) debugfs_create_file("resume", 0200, vfdent, xe, &resume_vf_fops); debugfs_create_file("stop", 0200, vfdent, xe, &stop_vf_fops); debugfs_create_file("reset", 0200, vfdent, xe, &reset_vf_fops); + debugfs_create_file("save", 0600, vfdent, xe, &save_vf_fops); + debugfs_create_file("restore", 0600, vfdent, xe, &restore_vf_fops); } static void pf_populate_with_tiles(struct xe_device *xe, struct dentry *dent, unsigned int vfid) -- 2.50.1 Migration data is queued in a per-GT ptr_ring to decouple the worker responsible for handling the data transfer from the .read() and .write() syscalls. Add the data structures and handlers that will be used in future commits. Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c | 271 +++++++++++++++++- drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h | 6 +- .../gpu/drm/xe/xe_gt_sriov_pf_control_types.h | 12 + drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 183 ++++++++++++ drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h | 14 + .../drm/xe/xe_gt_sriov_pf_migration_types.h | 11 + drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h | 3 + drivers/gpu/drm/xe/xe_sriov_pf_migration.c | 142 +++++++++ drivers/gpu/drm/xe/xe_sriov_pf_migration.h | 7 + .../gpu/drm/xe/xe_sriov_pf_migration_types.h | 58 ++++ drivers/gpu/drm/xe/xe_sriov_pf_types.h | 3 + 11 files changed, 695 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index a571e1c02a3b4..ad9dcd3f56453 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -19,6 +19,7 @@ #include "xe_guc_ct.h" #include "xe_sriov.h" #include "xe_sriov_pf_control.h" +#include "xe_sriov_pf_migration.h" #include "xe_sriov_pf_service.h" #include "xe_tile.h" @@ -185,9 +186,15 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit) CASE2STR(PAUSE_FAILED); CASE2STR(PAUSED); CASE2STR(SAVE_WIP); + CASE2STR(SAVE_PROCESS_DATA); + CASE2STR(SAVE_WAIT_DATA); + CASE2STR(SAVE_DATA_DONE); CASE2STR(SAVE_FAILED); CASE2STR(SAVED); CASE2STR(RESTORE_WIP); + CASE2STR(RESTORE_PROCESS_DATA); + CASE2STR(RESTORE_WAIT_DATA); + CASE2STR(RESTORE_DATA_DONE); CASE2STR(RESTORE_FAILED); CASE2STR(RESTORED); CASE2STR(RESUME_WIP); @@ -804,9 +811,50 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid) return -ECANCELED; } +/** + * DOC: The VF SAVE state machine + * + * SAVE extends the PAUSED state. + * + * The VF SAVE state machine looks like:: + * + * ....PAUSED.................................................... + * : : + * : (O)<---------o : + * : | \ : + * : save (SAVED) (SAVE_FAILED) : + * : | ^ ^ : + * : | | | : + * : ....V...............o...........o......SAVE_WIP......... : + * : : | | | : : + * : : | empty | : : + * : : | | | : : + * : : | | | : : + * : : | DATA_DONE | : : + * : : | ^ | : : + * : : | | error : : + * : : | no_data / : : + * : : | / / : : + * : : | / / : : + * : : | / / : : + * : : o---------->PROCESS_DATA<----consume : : + * : : \ \ : : + * : : \ \ : : + * : : \ \ : : + * : : ring_full----->WAIT_DATA : : + * : : : : + * : :......................................................: : + * :............................................................: + * + * For the full state machine view, see `The VF state machine`_. + */ static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid) { - pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP); + if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) { + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE); + } } static void pf_enter_vf_saved(struct xe_gt *gt, unsigned int vfid) @@ -821,19 +869,54 @@ static void pf_enter_vf_saved(struct xe_gt *gt, unsigned int vfid) pf_exit_vf_wip(gt, vfid); } +static void pf_enter_vf_save_failed(struct xe_gt *gt, unsigned int vfid) +{ + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED); + pf_exit_vf_wip(gt, vfid); +} + +static int pf_handle_vf_save_data(struct xe_gt *gt, unsigned int vfid) +{ + return 0; +} + static bool pf_handle_vf_save(struct xe_gt *gt, unsigned int vfid) { - if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) + int ret; + + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA)) return false; - pf_enter_vf_saved(gt, vfid); + if (xe_gt_sriov_pf_migration_ring_full(gt, vfid)) { + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA); + + return true; + } + + ret = pf_handle_vf_save_data(gt, vfid); + if (ret == -EAGAIN) + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA); + else if (ret) + pf_enter_vf_save_failed(gt, vfid); + else + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE); return true; } +static void pf_exit_vf_save_wait_data(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA)) + return; + + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA); + pf_queue_vf(gt, vfid); +} + static bool pf_enter_vf_save_wip(struct xe_gt *gt, unsigned int vfid) { if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) { + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA); pf_enter_vf_wip(gt, vfid); pf_queue_vf(gt, vfid); return true; @@ -842,6 +925,32 @@ static bool pf_enter_vf_save_wip(struct xe_gt *gt, unsigned int vfid) return false; } +/** + * xe_gt_sriov_pf_control_check_save_data_done() - Check if all save migration data was produced. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +bool xe_gt_sriov_pf_control_check_save_data_done(struct xe_gt *gt, unsigned int vfid) +{ + return pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE); +} + +/** + * xe_gt_sriov_pf_control_process_save_data() - Queue VF save migration data processing. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + */ +void xe_gt_sriov_pf_control_process_save_data(struct xe_gt *gt, unsigned int vfid) +{ + pf_exit_vf_save_wait_data(gt, vfid); +} + /** * xe_gt_sriov_pf_control_trigger_save_vf() - Start an SR-IOV VF migration data save sequence. * @gt: the &xe_gt @@ -887,19 +996,62 @@ int xe_gt_sriov_pf_control_trigger_save_vf(struct xe_gt *gt, unsigned int vfid) */ int xe_gt_sriov_pf_control_finish_save_vf(struct xe_gt *gt, unsigned int vfid) { - if (!pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED)) { - pf_enter_vf_mismatch(gt, vfid); + if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE)) { + xe_gt_sriov_err(gt, "VF%u save is still in progress!\n", vfid); return -EIO; } pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE); + pf_enter_vf_saved(gt, vfid); return 0; } +/** + * DOC: The VF RESTORE state machine + * + * RESTORE extends the PAUSED state. + * + * The VF RESTORE state machine looks like:: + * + * ....PAUSED.................................................... + * : : + * : (O)<---------o : + * : | \ : + * : restore (RESTORED) (RESTORE_FAILED) : + * : | ^ ^ : + * : | | | : + * : ....V...............o...........o......RESTORE_WIP...... : + * : : | | | : : + * : : | empty | : : + * : : | | | : : + * : : | | | : : + * : : | DATA_DONE | : : + * : : | ^ | : : + * : : | | error : : + * : : | trailer / : : + * : : | / / : : + * : : | / / : : + * : : | / / : : + * : : o---------->PROCESS_DATA<----produce : : + * : : \ \ : : + * : : \ \ : : + * : : \ \ : : + * : : ring_empty---->WAIT_DATA : : + * : : : : + * : :......................................................: : + * :............................................................: + * + * For the full state machine view, see `The VF state machine`_. + */ static void pf_exit_vf_restore_wip(struct xe_gt *gt, unsigned int vfid) { - pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP); + if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) { + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_DATA_DONE); + } } static void pf_enter_vf_restored(struct xe_gt *gt, unsigned int vfid) @@ -914,19 +1066,61 @@ static void pf_enter_vf_restored(struct xe_gt *gt, unsigned int vfid) pf_exit_vf_wip(gt, vfid); } +static void pf_enter_vf_restore_failed(struct xe_gt *gt, unsigned int vfid) +{ + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED); + pf_exit_vf_wip(gt, vfid); +} + +static int pf_handle_vf_restore_data(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_sriov_migration_data *data = xe_gt_sriov_pf_migration_restore_consume(gt, vfid); + + xe_gt_assert(gt, data); + + xe_gt_sriov_notice(gt, "Skipping VF%u unknown data type: %d\n", vfid, data->type); + + return 0; +} + static bool pf_handle_vf_restore(struct xe_gt *gt, unsigned int vfid) { - if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) + int ret; + + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA)) return false; - pf_enter_vf_restored(gt, vfid); + if (xe_gt_sriov_pf_migration_ring_empty(gt, vfid)) { + if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_DATA_DONE)) + pf_enter_vf_restored(gt, vfid); + else + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA); + + return true; + } + + ret = pf_handle_vf_restore_data(gt, vfid); + if (ret) + pf_enter_vf_restore_failed(gt, vfid); + else + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA); return true; } +static void pf_exit_vf_restore_wait_data(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA)) + return; + + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA); + pf_queue_vf(gt, vfid); +} + static bool pf_enter_vf_restore_wip(struct xe_gt *gt, unsigned int vfid) { if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) { + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA); pf_enter_vf_wip(gt, vfid); pf_queue_vf(gt, vfid); return true; @@ -935,6 +1129,47 @@ static bool pf_enter_vf_restore_wip(struct xe_gt *gt, unsigned int vfid) return false; } +/** + * xe_gt_sriov_pf_control_restore_data_done() - Indicate the end of VF migration data stream. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_control_restore_data_done(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) { + pf_enter_vf_state_machine_bug(gt, vfid); + return -EIO; + } + + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_DATA_DONE)) { + pf_enter_vf_state_machine_bug(gt, vfid); + return -EIO; + } + + xe_gt_sriov_pf_control_process_restore_data(gt, vfid); + + return 0; +} + +/** + * xe_gt_sriov_pf_control_process_restore_data() - Queue VF restore migration data processing. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + */ +void xe_gt_sriov_pf_control_process_restore_data(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) + pf_enter_vf_state_machine_bug(gt, vfid); + + pf_exit_vf_restore_wait_data(gt, vfid); +} + /** * xe_gt_sriov_pf_control_trigger restore_vf() - Start an SR-IOV VF migration data restore sequence. * @gt: the &xe_gt @@ -1000,11 +1235,9 @@ int xe_gt_sriov_pf_control_finish_restore_vf(struct xe_gt *gt, unsigned int vfid { int ret; - if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) { - ret = pf_wait_vf_restore_done(gt, vfid); - if (ret) - return ret; - } + ret = pf_wait_vf_restore_done(gt, vfid); + if (ret) + return ret; if (!pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED)) { pf_enter_vf_mismatch(gt, vfid); @@ -1705,9 +1938,21 @@ static bool pf_process_vf_state_machine(struct xe_gt *gt, unsigned int vfid) if (pf_exit_vf_pause_save_guc(gt, vfid)) return true; + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA)) { + xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid, + control_bit_to_string(XE_GT_SRIOV_STATE_SAVE_WAIT_DATA)); + return false; + } + if (pf_handle_vf_save(gt, vfid)) return true; + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA)) { + xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid, + control_bit_to_string(XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA)); + return false; + } + if (pf_handle_vf_restore(gt, vfid)) return true; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h index abc233f6302ed..6b1ab339e3b73 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h @@ -14,12 +14,14 @@ struct xe_gt; int xe_gt_sriov_pf_control_init(struct xe_gt *gt); void xe_gt_sriov_pf_control_restart(struct xe_gt *gt); -bool xe_gt_sriov_pf_control_check_vf_data_wip(struct xe_gt *gt, unsigned int vfid); - int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid); +bool xe_gt_sriov_pf_control_check_save_data_done(struct xe_gt *gt, unsigned int vfid); +void xe_gt_sriov_pf_control_process_save_data(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_trigger_save_vf(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_finish_save_vf(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_control_restore_data_done(struct xe_gt *gt, unsigned int vfid); +void xe_gt_sriov_pf_control_process_restore_data(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_trigger_restore_vf(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_finish_restore_vf(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h index ea16ceb39dabb..a14c738581ae2 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h @@ -32,9 +32,15 @@ * @XE_GT_SRIOV_STATE_PAUSE_FAILED: indicates that a VF pause operation has failed. * @XE_GT_SRIOV_STATE_PAUSED: indicates that the VF is paused. * @XE_GT_SRIOV_STATE_SAVE_WIP: indicates that VF save operation is in progress. + * @XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA: indicates that VF migration data is being produced. + * @XE_GT_SRIOV_STATE_SAVE_WAIT_DATA: indicates that PF awaits for space in migration data ring. + * @XE_GT_SRIOV_STATE_SAVE_DATA_DONE: indicates that all migration data was produced by Xe. * @XE_GT_SRIOV_STATE_SAVE_FAILED: indicates that VF save operation has failed. * @XE_GT_SRIOV_STATE_SAVED: indicates that VF data is saved. * @XE_GT_SRIOV_STATE_RESTORE_WIP: indicates that VF restore operation is in progress. + * @XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA: indicates that VF migration data is being consumed. + * @XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA: indicates that PF awaits for data in migration data ring. + * @XE_GT_SRIOV_STATE_RESTORE_DATA_DONE: indicates that all migration data was produced by the user. * @XE_GT_SRIOV_STATE_RESTORE_FAILED: indicates that VF restore operation has failed. * @XE_GT_SRIOV_STATE_RESTORED: indicates that VF data is restored. * @XE_GT_SRIOV_STATE_RESUME_WIP: indicates the a VF resume operation is in progress. @@ -70,10 +76,16 @@ enum xe_gt_sriov_control_bits { XE_GT_SRIOV_STATE_PAUSED, XE_GT_SRIOV_STATE_SAVE_WIP, + XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA, + XE_GT_SRIOV_STATE_SAVE_WAIT_DATA, + XE_GT_SRIOV_STATE_SAVE_DATA_DONE, XE_GT_SRIOV_STATE_SAVE_FAILED, XE_GT_SRIOV_STATE_SAVED, XE_GT_SRIOV_STATE_RESTORE_WIP, + XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA, + XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA, + XE_GT_SRIOV_STATE_RESTORE_DATA_DONE, XE_GT_SRIOV_STATE_RESTORE_FAILED, XE_GT_SRIOV_STATE_RESTORED, diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c index ca28f45aaf481..e61e6f4215864 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c @@ -7,6 +7,7 @@ #include "abi/guc_actions_sriov_abi.h" #include "xe_bo.h" +#include "xe_gt_sriov_pf_control.h" #include "xe_gt_sriov_pf_helpers.h" #include "xe_gt_sriov_pf_migration.h" #include "xe_gt_sriov_printk.h" @@ -15,6 +16,17 @@ #include "xe_sriov.h" #include "xe_sriov_pf_migration.h" +#define XE_GT_SRIOV_PF_MIGRATION_RING_SIZE 5 + +static struct xe_gt_sriov_migration_data *pf_pick_gt_migration(struct xe_gt *gt, unsigned int vfid) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + return >->sriov.pf.vfs[vfid].migration; +} + /* Return: number of dwords saved/restored/required or a negative error code on failure */ static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode, u64 addr, u32 ndwords) @@ -382,6 +394,162 @@ ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int } #endif /* CONFIG_DEBUG_FS */ +/** + * xe_gt_sriov_pf_migration_ring_empty() - Check if a migration ring is empty. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * Return: true if the ring is empty, otherwise false. + */ +bool xe_gt_sriov_pf_migration_ring_empty(struct xe_gt *gt, unsigned int vfid) +{ + return ptr_ring_empty(&pf_pick_gt_migration(gt, vfid)->ring); +} + +/** + * xe_gt_sriov_pf_migration_ring_full() - Check if a migration ring is full. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * Return: true if the ring is full, otherwise false. + */ +bool xe_gt_sriov_pf_migration_ring_full(struct xe_gt *gt, unsigned int vfid) +{ + return ptr_ring_full(&pf_pick_gt_migration(gt, vfid)->ring); +} + +/** + * xe_gt_sriov_pf_migration_save_produce() - Add VF save data packet to migration ring. + * @gt: the &xe_gt + * @vfid: the VF identifier + * @data: &xe_sriov_migration_data packet + * + * Called by the save migration data producer (PF SR-IOV Control worker) when + * processing migration data. + * Wakes up the save migration data consumer (userspace), that is potentially + * waiting for data when the ring is empty. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_save_produce(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data) +{ + int ret; + + ret = ptr_ring_produce(&pf_pick_gt_migration(gt, vfid)->ring, data); + if (ret) + return ret; + + wake_up_all(xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid)); + + return 0; +} + +/** + * xe_gt_sriov_pf_migration_restore_consume() - Get VF restore data packet from migration ring. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * Called by the restore migration data consumer (PF SR-IOV Control worker) when + * processing migration data. + * Wakes up the restore migration data producer (userspace), that is + * potentially waiting to add more data when the ring is full. + * + * Return: Pointer to &struct xe_sriov_migration_data on success, + * NULL if ring is empty. + */ +struct xe_sriov_migration_data * +xe_gt_sriov_pf_migration_restore_consume(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid); + struct wait_queue_head *wq = xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid); + struct xe_sriov_migration_data *data; + + data = ptr_ring_consume(&migration->ring); + if (data) + wake_up_all(wq); + + return data; +} + +/** + * xe_gt_sriov_pf_migration_restore_produce() - Add VF restore data packet to migration ring. + * @gt: the &xe_gt + * @vfid: the VF identifier + * @data: &xe_sriov_migration_data packet + * + * Called by the restore migration data producer (userspace) when processing + * migration data. + * If the ring is full, waits until there is space. + * Queues the restore migration data consumer (PF SR-IOV Control worker), that + * is potentially waiting for data when the ring is empty. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_restore_produce(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data) +{ + struct wait_queue_head *wq = xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid); + struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid); + int ret; + + xe_gt_assert(gt, data->tile == gt->tile->id); + xe_gt_assert(gt, data->gt == gt->info.id); + + for (;;) { + ret = ptr_ring_produce(&migration->ring, data); + if (!ret) + break; + + ret = wait_event_interruptible(*wq, !ptr_ring_full(&migration->ring)); + if (ret) + return ret; + } + + xe_gt_sriov_pf_control_process_restore_data(gt, vfid); + + return 0; +} + +/** + * xe_gt_sriov_pf_migration_save_consume() - Get VF save data packet from migration ring. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * Called by the save migration data consumer (userspace) when + * processing migration data. + * Queues the save migration data producer (PF SR-IOV Control worker), that is + * potentially waiting to add more data when the ring is full. + * + * Return: Pointer to &struct xe_sriov_migration_data on success, + * NULL if ring is empty and there's no more data available, + * ERR_PTR(-EAGAIN) if the ring is empty, but data is still produced. + */ +struct xe_sriov_migration_data * +xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid); + struct xe_sriov_migration_data *data; + + data = ptr_ring_consume(&migration->ring); + if (data) { + xe_gt_sriov_pf_control_process_save_data(gt, vfid); + return data; + } + + if (xe_gt_sriov_pf_control_check_save_data_done(gt, vfid)) + return NULL; + + return ERR_PTR(-EAGAIN); +} + +static void action_ring_cleanup(void *arg) +{ + struct ptr_ring *r = arg; + + ptr_ring_cleanup(r, NULL); +} + /** * xe_gt_sriov_pf_migration_init() - Initialize support for VF migration. * @gt: the &xe_gt @@ -393,6 +561,7 @@ ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int int xe_gt_sriov_pf_migration_init(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); + unsigned int n, totalvfs; int err; xe_gt_assert(gt, IS_SRIOV_PF(xe)); @@ -404,5 +573,19 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt) if (err) return err; + totalvfs = xe_sriov_pf_get_totalvfs(xe); + for (n = 1; n <= totalvfs; n++) { + struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, n); + + err = ptr_ring_init(&migration->ring, + XE_GT_SRIOV_PF_MIGRATION_RING_SIZE, GFP_KERNEL); + if (err) + return err; + + err = devm_add_action_or_reset(xe->drm.dev, action_ring_cleanup, &migration->ring); + if (err) + return err; + } + return 0; } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h index 09faeae00ddbb..9e67f18ded205 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h @@ -9,11 +9,25 @@ #include struct xe_gt; +struct xe_sriov_migration_data; int xe_gt_sriov_pf_migration_init(struct xe_gt *gt); int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid); +bool xe_gt_sriov_pf_migration_ring_empty(struct xe_gt *gt, unsigned int vfid); +bool xe_gt_sriov_pf_migration_ring_full(struct xe_gt *gt, unsigned int vfid); + +int xe_gt_sriov_pf_migration_save_produce(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data); +struct xe_sriov_migration_data * +xe_gt_sriov_pf_migration_restore_consume(struct xe_gt *gt, unsigned int vfid); + +int xe_gt_sriov_pf_migration_restore_produce(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data); +struct xe_sriov_migration_data * +xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid); + #ifdef CONFIG_DEBUG_FS ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid, char __user *buf, size_t count, loff_t *pos); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h index 9d672feac5f04..84be6fac16c8b 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h @@ -7,6 +7,7 @@ #define _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_ #include +#include #include /** @@ -24,6 +25,16 @@ struct xe_gt_sriov_state_snapshot { } guc; }; +/** + * struct xe_gt_sriov_migration_data - GT-level per-VF migration data. + * + * Used by the PF driver to maintain per-VF migration data. + */ +struct xe_gt_sriov_migration_data { + /** @ring: queue containing VF save / restore migration data */ + struct ptr_ring ring; +}; + /** * struct xe_gt_sriov_pf_migration - GT-level data. * diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h index a64a6835ad656..812e74d3f8f80 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h @@ -33,6 +33,9 @@ struct xe_gt_sriov_metadata { /** @snapshot: snapshot of the VF state data */ struct xe_gt_sriov_state_snapshot snapshot; + + /** @migration: per-VF migration data. */ + struct xe_gt_sriov_migration_data migration; }; /** diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c index 8c523c392f98b..7be9f026d80e8 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c @@ -3,8 +3,36 @@ * Copyright © 2025 Intel Corporation */ +#include + +#include "xe_device.h" +#include "xe_gt_sriov_pf_control.h" +#include "xe_gt_sriov_pf_migration.h" +#include "xe_pm.h" #include "xe_sriov.h" +#include "xe_sriov_pf_helpers.h" #include "xe_sriov_pf_migration.h" +#include "xe_sriov_printk.h" + +static struct xe_sriov_pf_migration *pf_pick_migration(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + + return &xe->sriov.pf.vfs[vfid].migration; +} + +/** + * xe_sriov_pf_migration_waitqueue - Get waitqueue for migration. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * Return: pointer to the migration waitqueue. + */ +wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid) +{ + return &pf_pick_migration(xe, vfid)->wq; +} /** * xe_sriov_pf_migration_supported() - Check if SR-IOV VF migration is supported by the device @@ -33,9 +61,123 @@ static bool pf_check_migration_support(struct xe_device *xe) */ int xe_sriov_pf_migration_init(struct xe_device *xe) { + unsigned int n, totalvfs; + xe_assert(xe, IS_SRIOV_PF(xe)); xe->sriov.pf.migration.supported = pf_check_migration_support(xe); + if (!xe_sriov_pf_migration_supported(xe)) + return 0; + + totalvfs = xe_sriov_pf_get_totalvfs(xe); + for (n = 1; n <= totalvfs; n++) { + struct xe_sriov_pf_migration *migration = pf_pick_migration(xe, n); + + init_waitqueue_head(&migration->wq); + } return 0; } + +static bool pf_migration_data_ready(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + u8 gt_id; + + for_each_gt(gt, xe, gt_id) { + if (!xe_gt_sriov_pf_migration_ring_empty(gt, vfid) || + xe_gt_sriov_pf_control_check_save_data_done(gt, vfid)) + return true; + } + + return false; +} + +static struct xe_sriov_migration_data * +pf_migration_consume(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_migration_data *data; + struct xe_gt *gt; + u8 gt_id; + bool more_data = false; + + for_each_gt(gt, xe, gt_id) { + data = xe_gt_sriov_pf_migration_save_consume(gt, vfid); + if (data && PTR_ERR(data) != EAGAIN) + return data; + if (PTR_ERR(data) == -EAGAIN) + more_data = true; + } + + if (!more_data) + return NULL; + + return ERR_PTR(-EAGAIN); +} + +/** + * xe_sriov_pf_migration_save_consume() - Consume a VF migration data packet from the device. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * Called by the save migration data consumer (userspace) when + * processing migration data. + * If there is no migration data to process, wait until more data is available. + * + * Return: Pointer to &xe_sriov_migration_data on success, + * NULL if ring is empty and no more migration data is expected, + * ERR_PTR value in case of error. + * + * Return: 0 on success or a negative error code on failure. + */ +struct xe_sriov_migration_data * +xe_sriov_pf_migration_save_consume(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_pf_migration *migration = pf_pick_migration(xe, vfid); + struct xe_sriov_migration_data *data; + int ret; + + xe_assert(xe, IS_SRIOV_PF(xe)); + + for (;;) { + data = pf_migration_consume(xe, vfid); + if (PTR_ERR(data) != -EAGAIN) + break; + + ret = wait_event_interruptible(migration->wq, + pf_migration_data_ready(xe, vfid)); + if (ret) + return ERR_PTR(ret); + } + + return data; +} + +/** + * xe_sriov_pf_migration_restore_produce() - Produce a VF migration data packet to the device. + * @xe: the &xe_device + * @vfid: the VF identifier + * @data: Pointer to &xe_sriov_migration_data + * + * Called by the restore migration data producer (userspace) when processing + * migration data. + * If the underlying data structure is full, wait until there is space. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfid, + struct xe_sriov_migration_data *data) +{ + struct xe_gt *gt; + + xe_assert(xe, IS_SRIOV_PF(xe)); + + gt = xe_device_get_gt(xe, data->gt); + if (!gt || data->tile != gt->tile->id) { + xe_sriov_err_ratelimited(xe, "VF%d Invalid GT - tile:%u, GT:%u\n", + vfid, data->tile, data->gt); + return -EINVAL; + } + + return xe_gt_sriov_pf_migration_restore_produce(gt, vfid, data); +} diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h index d2b4a24165438..df81a540c246a 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h @@ -7,10 +7,17 @@ #define _XE_SRIOV_PF_MIGRATION_H_ #include +#include struct xe_device; +struct xe_sriov_migration_data; int xe_sriov_pf_migration_init(struct xe_device *xe); bool xe_sriov_pf_migration_supported(struct xe_device *xe); +int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfid, + struct xe_sriov_migration_data *data); +struct xe_sriov_migration_data * +xe_sriov_pf_migration_save_consume(struct xe_device *xe, unsigned int vfid); +wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid); #endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h index e69de29bb2d1d..2a45ee4e3ece8 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_PF_MIGRATION_TYPES_H_ +#define _XE_SRIOV_PF_MIGRATION_TYPES_H_ + +#include +#include + +/** + * struct xe_sriov_migration_data - Xe SR-IOV VF migration data packet + */ +struct xe_sriov_migration_data { + /** @xe: Xe device */ + struct xe_device *xe; + /** @vaddr: CPU pointer to payload data */ + void *vaddr; + /** @remaining: payload data remaining */ + size_t remaining; + /** @hdr_remaining: header data remaining */ + size_t hdr_remaining; + union { + /** @bo: Buffer object with migration data */ + struct xe_bo *bo; + /** @buff: Buffer with migration data */ + void *buff; + }; + __struct_group(xe_sriov_pf_migration_hdr, hdr, __packed, + /** @hdr.version: migration data protocol version */ + u8 version; + /** @hdr.type: migration data type */ + u8 type; + /** @hdr.tile: migration data tile id */ + u8 tile; + /** @hdr.gt: migration data gt id */ + u8 gt; + /** @hdr.flags: migration data flags */ + u32 flags; + /** @hdr.offset: offset into the resource; + * used when multiple packets of given type are used for migration + */ + u64 offset; + /** @hdr.size: migration data size */ + u64 size; + ); +}; + +/** + * struct xe_sriov_pf_migration - Per VF device-level migration related data + */ +struct xe_sriov_pf_migration { + /** @wq: waitqueue used to avoid busy-waiting for snapshot production/consumption */ + wait_queue_head_t wq; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_types.h index 24d22afeececa..c92baaa1694ca 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_types.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf_types.h @@ -9,6 +9,7 @@ #include #include +#include "xe_sriov_pf_migration_types.h" #include "xe_sriov_pf_provision_types.h" #include "xe_sriov_pf_service_types.h" @@ -18,6 +19,8 @@ struct xe_sriov_metadata { /** @version: negotiated VF/PF ABI version */ struct xe_sriov_pf_service_version version; + /** @migration: migration data */ + struct xe_sriov_pf_migration migration; }; /** -- 2.50.1 Now that it's possible to free the packets - connect the restore handling logic with the ring. The helpers will also be used in upcoming changes that will start producing migration data packets. Signed-off-by: Michał Winiarski Reviewed-by: Michal Wajdeczko --- drivers/gpu/drm/xe/Makefile | 1 + drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c | 7 + drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 29 +++- drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h | 1 + drivers/gpu/drm/xe/xe_sriov_migration_data.c | 126 ++++++++++++++++++ drivers/gpu/drm/xe/xe_sriov_migration_data.h | 30 +++++ 6 files changed, 193 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/xe/xe_sriov_migration_data.c create mode 100644 drivers/gpu/drm/xe/xe_sriov_migration_data.h diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index 89e5b26c27975..3d72db9e528e4 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -173,6 +173,7 @@ xe-$(CONFIG_PCI_IOV) += \ xe_lmtt_2l.o \ xe_lmtt_ml.o \ xe_pci_sriov.o \ + xe_sriov_migration_data.o \ xe_sriov_pf.o \ xe_sriov_pf_control.o \ xe_sriov_pf_debugfs.o \ diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index ad9dcd3f56453..02019b646389c 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -18,6 +18,7 @@ #include "xe_gt_sriov_printk.h" #include "xe_guc_ct.h" #include "xe_sriov.h" +#include "xe_sriov_migration_data.h" #include "xe_sriov_pf_control.h" #include "xe_sriov_pf_migration.h" #include "xe_sriov_pf_service.h" @@ -851,6 +852,8 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid) static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid) { if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) { + xe_gt_sriov_pf_migration_ring_free(gt, vfid); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE); @@ -1048,6 +1051,8 @@ int xe_gt_sriov_pf_control_finish_save_vf(struct xe_gt *gt, unsigned int vfid) static void pf_exit_vf_restore_wip(struct xe_gt *gt, unsigned int vfid) { if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) { + xe_gt_sriov_pf_migration_ring_free(gt, vfid); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_DATA_DONE); @@ -1080,6 +1085,8 @@ static int pf_handle_vf_restore_data(struct xe_gt *gt, unsigned int vfid) xe_gt_sriov_notice(gt, "Skipping VF%u unknown data type: %d\n", vfid, data->type); + xe_sriov_migration_data_free(data); + return 0; } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c index e61e6f4215864..40b75b5fe9229 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c @@ -14,6 +14,7 @@ #include "xe_guc.h" #include "xe_guc_ct.h" #include "xe_sriov.h" +#include "xe_sriov_migration_data.h" #include "xe_sriov_pf_migration.h" #define XE_GT_SRIOV_PF_MIGRATION_RING_SIZE 5 @@ -418,6 +419,25 @@ bool xe_gt_sriov_pf_migration_ring_full(struct xe_gt *gt, unsigned int vfid) return ptr_ring_full(&pf_pick_gt_migration(gt, vfid)->ring); } +/** + * xe_gt_sriov_pf_migration_ring_free() - Consume and free all data in migration ring + * @gt: the &xe_gt + * @vfid: the VF identifier + */ +void xe_gt_sriov_pf_migration_ring_free(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid); + struct xe_sriov_migration_data *data; + + if (ptr_ring_empty(&migration->ring)) + return; + + xe_gt_sriov_notice(gt, "VF%u unprocessed migration data left in the ring!\n", vfid); + + while ((data = ptr_ring_consume(&migration->ring))) + xe_sriov_migration_data_free(data); +} + /** * xe_gt_sriov_pf_migration_save_produce() - Add VF save data packet to migration ring. * @gt: the &xe_gt @@ -543,11 +563,18 @@ xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid) return ERR_PTR(-EAGAIN); } +static void pf_mig_data_destroy(void *ptr) +{ + struct xe_sriov_migration_data *data = ptr; + + xe_sriov_migration_data_free(data); +} + static void action_ring_cleanup(void *arg) { struct ptr_ring *r = arg; - ptr_ring_cleanup(r, NULL); + ptr_ring_cleanup(r, pf_mig_data_destroy); } /** diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h index 9e67f18ded205..1ed2248f0a17e 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h @@ -17,6 +17,7 @@ int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vf bool xe_gt_sriov_pf_migration_ring_empty(struct xe_gt *gt, unsigned int vfid); bool xe_gt_sriov_pf_migration_ring_full(struct xe_gt *gt, unsigned int vfid); +void xe_gt_sriov_pf_migration_ring_free(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_migration_save_produce(struct xe_gt *gt, unsigned int vfid, struct xe_sriov_migration_data *data); diff --git a/drivers/gpu/drm/xe/xe_sriov_migration_data.c b/drivers/gpu/drm/xe/xe_sriov_migration_data.c new file mode 100644 index 0000000000000..2371ca3e6b9a9 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_migration_data.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include "xe_bo.h" +#include "xe_device.h" +#include "xe_sriov_migration_data.h" + +static bool data_needs_bo(struct xe_sriov_migration_data *data) +{ + return data->type == XE_SRIOV_MIGRATION_DATA_TYPE_VRAM; +} + +/** + * xe_sriov_migration_data() - Allocate migration data packet + * @xe: the &xe_device + * + * Only allocates the "outer" structure, without initializing the migration + * data backing storage. + * + * Return: Pointer to &xe_sriov_migration_data on success, + * NULL in case of error. + */ +struct xe_sriov_migration_data *xe_sriov_migration_data_alloc(struct xe_device *xe) +{ + struct xe_sriov_migration_data *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return NULL; + + data->xe = xe; + data->hdr_remaining = sizeof(data->hdr); + + return data; +} + +/** + * xe_sriov_migration_data_free() - Free migration data packet. + * @data: the &xe_sriov_migration_data packet + */ +void xe_sriov_migration_data_free(struct xe_sriov_migration_data *data) +{ + if (data_needs_bo(data)) + xe_bo_unpin_map_no_vm(data->bo); + else + kvfree(data->buff); + + kfree(data); +} + +static int mig_data_init(struct xe_sriov_migration_data *data) +{ + struct xe_gt *gt = xe_device_get_gt(data->xe, data->gt); + + if (data->size == 0) + return 0; + + if (data_needs_bo(data)) { + struct xe_bo *bo; + + bo = xe_bo_create_pin_map_novm(data->xe, gt->tile, PAGE_ALIGN(data->size), + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED, false); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + data->bo = bo; + data->vaddr = bo->vmap.vaddr; + } else { + void *buff = kvzalloc(data->size, GFP_KERNEL); + + if (!buff) + return -ENOMEM; + + data->buff = buff; + data->vaddr = buff; + } + + return 0; +} + +#define XE_SRIOV_MIGRATION_DATA_SUPPORTED_VERSION 1 +/** + * xe_sriov_migration_data_init() - Initialize the migration data header and backing storage. + * @data: the &xe_sriov_migration_data packet + * @tile_id: tile identifier + * @gt_id: GT identifier + * @type: &xe_sriov_migration_data_type + * @offset: offset of data packet payload (within wider resource) + * @size: size of data packet payload + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_migration_data_init(struct xe_sriov_migration_data *data, u8 tile_id, u8 gt_id, + enum xe_sriov_migration_data_type type, loff_t offset, size_t size) +{ + data->version = XE_SRIOV_MIGRATION_DATA_SUPPORTED_VERSION; + data->type = type; + data->tile = tile_id; + data->gt = gt_id; + data->offset = offset; + data->size = size; + data->remaining = size; + + return mig_data_init(data); +} + +/** + * xe_sriov_migration_data_init() - Initialize the migration data backing storage based on header. + * @data: the &xe_sriov_migration_data packet + * + * Header data is expected to be filled prior to calling this function. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_migration_data_init_from_hdr(struct xe_sriov_migration_data *data) +{ + if (data->version != XE_SRIOV_MIGRATION_DATA_SUPPORTED_VERSION) + return -EINVAL; + + data->remaining = data->size; + + return mig_data_init(data); +} diff --git a/drivers/gpu/drm/xe/xe_sriov_migration_data.h b/drivers/gpu/drm/xe/xe_sriov_migration_data.h new file mode 100644 index 0000000000000..3958f58a170f5 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_migration_data.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_MIGRATION_DATA_H_ +#define _XE_SRIOV_MIGRATION_DATA_H_ + +#include + +struct xe_device; + +enum xe_sriov_migration_data_type { + /* Skipping 0 to catch uninitialized data */ + XE_SRIOV_MIGRATION_DATA_TYPE_DESCRIPTOR = 1, + XE_SRIOV_MIGRATION_DATA_TYPE_TRAILER, + XE_SRIOV_MIGRATION_DATA_TYPE_GGTT, + XE_SRIOV_MIGRATION_DATA_TYPE_MMIO, + XE_SRIOV_MIGRATION_DATA_TYPE_GUC, + XE_SRIOV_MIGRATION_DATA_TYPE_VRAM, +}; + +struct xe_sriov_migration_data *xe_sriov_migration_data_alloc(struct xe_device *xe); +void xe_sriov_migration_data_free(struct xe_sriov_migration_data *snapshot); + +int xe_sriov_migration_data_init(struct xe_sriov_migration_data *data, u8 tile_id, u8 gt_id, + enum xe_sriov_migration_data_type, loff_t offset, size_t size); +int xe_sriov_migration_data_init_from_hdr(struct xe_sriov_migration_data *snapshot); + +#endif -- 2.50.1 Add debugfs handlers for migration state and handle bitstream .read()/.write() to convert from bitstream to/from migration data packets. As descriptor/trailer are handled at this layer - add handling for both save and restore side. Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_sriov_migration_data.c | 337 ++++++++++++++++++ drivers/gpu/drm/xe/xe_sriov_migration_data.h | 5 + drivers/gpu/drm/xe/xe_sriov_pf_control.c | 5 + drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c | 35 ++ drivers/gpu/drm/xe/xe_sriov_pf_migration.c | 54 +++ .../gpu/drm/xe/xe_sriov_pf_migration_types.h | 9 + 6 files changed, 445 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_sriov_migration_data.c b/drivers/gpu/drm/xe/xe_sriov_migration_data.c index 2371ca3e6b9a9..a3f50836adc81 100644 --- a/drivers/gpu/drm/xe/xe_sriov_migration_data.c +++ b/drivers/gpu/drm/xe/xe_sriov_migration_data.c @@ -6,6 +6,45 @@ #include "xe_bo.h" #include "xe_device.h" #include "xe_sriov_migration_data.h" +#include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_migration.h" +#include "xe_sriov_printk.h" + +static struct mutex *pf_migration_mutex(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + + return &xe->sriov.pf.vfs[vfid].migration.lock; +} + +static struct xe_sriov_migration_data **pf_pick_pending(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + lockdep_assert_held(pf_migration_mutex(xe, vfid)); + + return &xe->sriov.pf.vfs[vfid].migration.pending; +} + +static struct xe_sriov_migration_data ** +pf_pick_descriptor(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + lockdep_assert_held(pf_migration_mutex(xe, vfid)); + + return &xe->sriov.pf.vfs[vfid].migration.descriptor; +} + +static struct xe_sriov_migration_data **pf_pick_trailer(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + lockdep_assert_held(pf_migration_mutex(xe, vfid)); + + return &xe->sriov.pf.vfs[vfid].migration.trailer; +} static bool data_needs_bo(struct xe_sriov_migration_data *data) { @@ -42,6 +81,9 @@ struct xe_sriov_migration_data *xe_sriov_migration_data_alloc(struct xe_device * */ void xe_sriov_migration_data_free(struct xe_sriov_migration_data *data) { + if (IS_ERR_OR_NULL(data)) + return; + if (data_needs_bo(data)) xe_bo_unpin_map_no_vm(data->bo); else @@ -124,3 +166,298 @@ int xe_sriov_migration_data_init_from_hdr(struct xe_sriov_migration_data *data) return mig_data_init(data); } + +static ssize_t vf_mig_data_hdr_read(struct xe_sriov_migration_data *data, + char __user *buf, size_t len) +{ + loff_t offset = sizeof(data->hdr) - data->hdr_remaining; + + if (!data->hdr_remaining) + return -EINVAL; + + if (len > data->hdr_remaining) + len = data->hdr_remaining; + + if (copy_to_user(buf, (void *)&data->hdr + offset, len)) + return -EFAULT; + + data->hdr_remaining -= len; + + return len; +} + +static ssize_t vf_mig_data_read(struct xe_sriov_migration_data *data, + char __user *buf, size_t len) +{ + if (len > data->remaining) + len = data->remaining; + + if (copy_to_user(buf, data->vaddr + (data->size - data->remaining), len)) + return -EFAULT; + + data->remaining -= len; + + return len; +} + +static ssize_t __vf_mig_data_read_single(struct xe_sriov_migration_data **data, + unsigned int vfid, char __user *buf, size_t len) +{ + ssize_t copied = 0; + + if ((*data)->hdr_remaining) + copied = vf_mig_data_hdr_read(*data, buf, len); + else + copied = vf_mig_data_read(*data, buf, len); + + if ((*data)->remaining == 0 && (*data)->hdr_remaining == 0) { + xe_sriov_migration_data_free(*data); + *data = NULL; + } + + return copied; +} + +static struct xe_sriov_migration_data **vf_mig_pick_data(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_migration_data **data; + + data = pf_pick_descriptor(xe, vfid); + if (*data) + return data; + + data = pf_pick_pending(xe, vfid); + if (!*data) + *data = xe_sriov_pf_migration_save_consume(xe, vfid); + if (*data) + return data; + + data = pf_pick_trailer(xe, vfid); + if (*data) + return data; + + return ERR_PTR(-ENODATA); +} + +static ssize_t vf_mig_data_read_single(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len) +{ + struct xe_sriov_migration_data **data = vf_mig_pick_data(xe, vfid); + + if (IS_ERR(data)) + return PTR_ERR(data); + + return __vf_mig_data_read_single(data, vfid, buf, len); +} + +/** + * xe_sriov_migration_data_read() - Read migration data from the device. + * @xe: the &xe_device + * @vfid: the VF identifier + * @buf: start address of userspace buffer + * @len: requested read size from userspace + * + * Return: number of bytes that has been successfully read, + * 0 if no more migration data is available, + * -errno on failure. + */ +ssize_t xe_sriov_migration_data_read(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len) +{ + ssize_t ret, consumed = 0; + + xe_assert(xe, IS_SRIOV_PF(xe)); + + scoped_cond_guard(mutex_intr, return -EINTR, pf_migration_mutex(xe, vfid)) { + while (consumed < len) { + ret = vf_mig_data_read_single(xe, vfid, buf, len - consumed); + if (ret == -ENODATA) + break; + if (ret < 0) + return ret; + + consumed += ret; + buf += ret; + } + } + + return consumed; +} + +static ssize_t vf_mig_hdr_write(struct xe_sriov_migration_data *data, + const char __user *buf, size_t len) +{ + loff_t offset = sizeof(data->hdr) - data->hdr_remaining; + int ret; + + if (len > data->hdr_remaining) + len = data->hdr_remaining; + + if (copy_from_user((void *)&data->hdr + offset, buf, len)) + return -EFAULT; + + data->hdr_remaining -= len; + + if (!data->hdr_remaining) { + ret = xe_sriov_migration_data_init_from_hdr(data); + if (ret) + return ret; + } + + return len; +} + +static ssize_t vf_mig_data_write(struct xe_sriov_migration_data *data, + const char __user *buf, size_t len) +{ + if (len > data->remaining) + len = data->remaining; + + if (copy_from_user(data->vaddr + (data->size - data->remaining), buf, len)) + return -EFAULT; + + data->remaining -= len; + + return len; +} + +static ssize_t vf_mig_data_write_single(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len) +{ + struct xe_sriov_migration_data **data = pf_pick_pending(xe, vfid); + int ret; + ssize_t copied; + + if (IS_ERR_OR_NULL(*data)) { + *data = xe_sriov_migration_data_alloc(xe); + if (!*data) + return -ENOMEM; + } + + if ((*data)->hdr_remaining) + copied = vf_mig_hdr_write(*data, buf, len); + else + copied = vf_mig_data_write(*data, buf, len); + + if ((*data)->hdr_remaining == 0 && (*data)->remaining == 0) { + ret = xe_sriov_pf_migration_restore_produce(xe, vfid, *data); + if (ret) { + xe_sriov_migration_data_free(*data); + return ret; + } + + *data = NULL; + } + + return copied; +} + +/** + * xe_sriov_migration_data_write() - Write migration data to the device. + * @xe: the &xe_device + * @vfid: the VF identifier + * @buf: start address of userspace buffer + * @len: requested write size from userspace + * + * Return: number of bytes that has been successfully written, + * -errno on failure. + */ +ssize_t xe_sriov_migration_data_write(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len) +{ + ssize_t ret, produced = 0; + + xe_assert(xe, IS_SRIOV_PF(xe)); + + scoped_cond_guard(mutex_intr, return -EINTR, pf_migration_mutex(xe, vfid)) { + while (produced < len) { + ret = vf_mig_data_write_single(xe, vfid, buf, len - produced); + if (ret < 0) + return ret; + + produced += ret; + buf += ret; + } + } + + return produced; +} + +#define MIGRATION_DESCRIPTOR_DWORDS 0 +static size_t pf_descriptor_init(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_migration_data **desc = pf_pick_descriptor(xe, vfid); + struct xe_sriov_migration_data *data; + int ret; + + data = xe_sriov_migration_data_alloc(xe); + if (!data) + return -ENOMEM; + + ret = xe_sriov_migration_data_init(data, 0, 0, XE_SRIOV_MIGRATION_DATA_TYPE_DESCRIPTOR, + 0, MIGRATION_DESCRIPTOR_DWORDS * sizeof(u32)); + if (ret) { + xe_sriov_migration_data_free(data); + return ret; + } + + *desc = data; + + return 0; +} + +static void pf_pending_init(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_migration_data **data = pf_pick_pending(xe, vfid); + + *data = NULL; +} + +#define MIGRATION_TRAILER_SIZE 0 +static int pf_trailer_init(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_migration_data **trailer = pf_pick_trailer(xe, vfid); + struct xe_sriov_migration_data *data; + int ret; + + data = xe_sriov_migration_data_alloc(xe); + if (!data) + return -ENOMEM; + + ret = xe_sriov_migration_data_init(data, 0, 0, XE_SRIOV_MIGRATION_DATA_TYPE_TRAILER, + 0, MIGRATION_TRAILER_SIZE); + if (ret) { + xe_sriov_migration_data_free(data); + return ret; + } + + *trailer = data; + + return 0; +} + +/** + * xe_sriov_migration_data_save_init() - Initialize the pending save migration data. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * Return: 0 on success, -errno on failure. + */ +int xe_sriov_migration_data_save_init(struct xe_device *xe, unsigned int vfid) +{ + int ret; + + scoped_cond_guard(mutex_intr, return -EINTR, pf_migration_mutex(xe, vfid)) { + ret = pf_descriptor_init(xe, vfid); + if (ret) + return ret; + + ret = pf_trailer_init(xe, vfid); + if (ret) + return ret; + + pf_pending_init(xe, vfid); + } + + return 0; +} diff --git a/drivers/gpu/drm/xe/xe_sriov_migration_data.h b/drivers/gpu/drm/xe/xe_sriov_migration_data.h index 3958f58a170f5..7ec489c3f28d2 100644 --- a/drivers/gpu/drm/xe/xe_sriov_migration_data.h +++ b/drivers/gpu/drm/xe/xe_sriov_migration_data.h @@ -26,5 +26,10 @@ void xe_sriov_migration_data_free(struct xe_sriov_migration_data *snapshot); int xe_sriov_migration_data_init(struct xe_sriov_migration_data *data, u8 tile_id, u8 gt_id, enum xe_sriov_migration_data_type, loff_t offset, size_t size); int xe_sriov_migration_data_init_from_hdr(struct xe_sriov_migration_data *snapshot); +ssize_t xe_sriov_migration_data_read(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len); +ssize_t xe_sriov_migration_data_write(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len); +int xe_sriov_migration_data_save_init(struct xe_device *xe, unsigned int vfid); #endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_sriov_pf_control.c index 8d8a01faf5291..c2768848daba1 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_control.c @@ -5,6 +5,7 @@ #include "xe_device.h" #include "xe_gt_sriov_pf_control.h" +#include "xe_sriov_migration_data.h" #include "xe_sriov_pf_control.h" #include "xe_sriov_printk.h" @@ -165,6 +166,10 @@ int xe_sriov_pf_control_trigger_save_vf(struct xe_device *xe, unsigned int vfid) unsigned int id; int ret; + ret = xe_sriov_migration_data_save_init(xe, vfid); + if (ret) + return ret; + for_each_gt(gt, xe, id) { ret = xe_gt_sriov_pf_control_trigger_save_vf(gt, vfid); if (ret) diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c index e0e6340c49106..a9a28aec22421 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c @@ -9,6 +9,7 @@ #include "xe_device.h" #include "xe_device_types.h" #include "xe_pm.h" +#include "xe_sriov_migration_data.h" #include "xe_sriov_pf.h" #include "xe_sriov_pf_control.h" #include "xe_sriov_pf_debugfs.h" @@ -132,6 +133,7 @@ static void pf_populate_pf(struct xe_device *xe, struct dentry *pfdent) * /sys/kernel/debug/dri/BDF/ * ├── sriov * │ ├── vf1 + * │ │ ├── migration_data * │ │ ├── pause * │ │ ├── reset * │ │ ├── resume @@ -220,6 +222,38 @@ DEFINE_VF_CONTROL_ATTRIBUTE(reset_vf); DEFINE_VF_CONTROL_ATTRIBUTE_RW(save_vf); DEFINE_VF_CONTROL_ATTRIBUTE_RW(restore_vf); +static ssize_t data_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) +{ + struct dentry *dent = file_dentry(file)->d_parent; + struct xe_device *xe = extract_xe(dent); + unsigned int vfid = extract_vfid(dent); + + if (*pos) + return -ESPIPE; + + return xe_sriov_migration_data_write(xe, vfid, buf, count); +} + +static ssize_t data_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + struct dentry *dent = file_dentry(file)->d_parent; + struct xe_device *xe = extract_xe(dent); + unsigned int vfid = extract_vfid(dent); + + if (*ppos) + return -ESPIPE; + + return xe_sriov_migration_data_read(xe, vfid, buf, count); +} + +static const struct file_operations data_vf_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = data_write, + .read = data_read, + .llseek = default_llseek, +}; + static void pf_populate_vf(struct xe_device *xe, struct dentry *vfdent) { debugfs_create_file("pause", 0200, vfdent, xe, &pause_vf_fops); @@ -228,6 +262,7 @@ static void pf_populate_vf(struct xe_device *xe, struct dentry *vfdent) debugfs_create_file("reset", 0200, vfdent, xe, &reset_vf_fops); debugfs_create_file("save", 0600, vfdent, xe, &save_vf_fops); debugfs_create_file("restore", 0600, vfdent, xe, &restore_vf_fops); + debugfs_create_file("migration_data", 0600, vfdent, xe, &data_vf_fops); } static void pf_populate_with_tiles(struct xe_device *xe, struct dentry *dent, unsigned int vfid) diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c index 7be9f026d80e8..8ea531d36f53b 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c @@ -10,6 +10,7 @@ #include "xe_gt_sriov_pf_migration.h" #include "xe_pm.h" #include "xe_sriov.h" +#include "xe_sriov_migration_data.h" #include "xe_sriov_pf_helpers.h" #include "xe_sriov_pf_migration.h" #include "xe_sriov_printk.h" @@ -53,6 +54,15 @@ static bool pf_check_migration_support(struct xe_device *xe) return IS_ENABLED(CONFIG_DRM_XE_DEBUG); } +static void pf_migration_cleanup(void *arg) +{ + struct xe_sriov_pf_migration *migration = arg; + + xe_sriov_migration_data_free(migration->pending); + xe_sriov_migration_data_free(migration->trailer); + xe_sriov_migration_data_free(migration->descriptor); +} + /** * xe_sriov_pf_migration_init() - Initialize support for SR-IOV VF migration. * @xe: the &xe_device @@ -62,6 +72,7 @@ static bool pf_check_migration_support(struct xe_device *xe) int xe_sriov_pf_migration_init(struct xe_device *xe) { unsigned int n, totalvfs; + int err; xe_assert(xe, IS_SRIOV_PF(xe)); @@ -73,7 +84,15 @@ int xe_sriov_pf_migration_init(struct xe_device *xe) for (n = 1; n <= totalvfs; n++) { struct xe_sriov_pf_migration *migration = pf_pick_migration(xe, n); + err = devm_mutex_init(xe->drm.dev, &migration->lock); + if (err) + return err; + init_waitqueue_head(&migration->wq); + + err = devm_add_action_or_reset(xe->drm.dev, pf_migration_cleanup, migration); + if (err) + return err; } return 0; @@ -153,6 +172,36 @@ xe_sriov_pf_migration_save_consume(struct xe_device *xe, unsigned int vfid) return data; } +static int pf_handle_descriptor(struct xe_device *xe, unsigned int vfid, + struct xe_sriov_migration_data *data) +{ + if (data->tile != 0 || data->gt != 0) + return -EINVAL; + + xe_sriov_migration_data_free(data); + + return 0; +} + +static int pf_handle_trailer(struct xe_device *xe, unsigned int vfid, + struct xe_sriov_migration_data *data) +{ + struct xe_gt *gt; + u8 gt_id; + + if (data->tile != 0 || data->gt != 0) + return -EINVAL; + if (data->offset != 0 || data->size != 0 || data->buff || data->bo) + return -EINVAL; + + xe_sriov_migration_data_free(data); + + for_each_gt(gt, xe, gt_id) + xe_gt_sriov_pf_control_restore_data_done(gt, vfid); + + return 0; +} + /** * xe_sriov_pf_migration_restore_produce() - Produce a VF migration data packet to the device. * @xe: the &xe_device @@ -172,6 +221,11 @@ int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfi xe_assert(xe, IS_SRIOV_PF(xe)); + if (data->type == XE_SRIOV_MIGRATION_DATA_TYPE_DESCRIPTOR) + return pf_handle_descriptor(xe, vfid, data); + if (data->type == XE_SRIOV_MIGRATION_DATA_TYPE_TRAILER) + return pf_handle_trailer(xe, vfid, data); + gt = xe_device_get_gt(xe, data->gt); if (!gt || data->tile != gt->tile->id) { xe_sriov_err_ratelimited(xe, "VF%d Invalid GT - tile:%u, GT:%u\n", diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h index 2a45ee4e3ece8..8468e5eeb6d66 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h @@ -7,6 +7,7 @@ #define _XE_SRIOV_PF_MIGRATION_TYPES_H_ #include +#include #include /** @@ -53,6 +54,14 @@ struct xe_sriov_migration_data { struct xe_sriov_pf_migration { /** @wq: waitqueue used to avoid busy-waiting for snapshot production/consumption */ wait_queue_head_t wq; + /** @lock: Mutex protecting the migration data */ + struct mutex lock; + /** @pending: currently processed data packet of VF resource */ + struct xe_sriov_migration_data *pending; + /** @trailer: data packet used to indicate the end of stream */ + struct xe_sriov_migration_data *trailer; + /** @descriptor: data packet containing the metadata describing the device */ + struct xe_sriov_migration_data *descriptor; }; #endif -- 2.50.1 The descriptor reuses the KLV format used by GuC and contains metadata that can be used to quickly fail migration when source is incompatible with destination. Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_sriov_migration_data.c | 89 +++++++++++++++++++- drivers/gpu/drm/xe/xe_sriov_migration_data.h | 2 + drivers/gpu/drm/xe/xe_sriov_pf_migration.c | 6 ++ 3 files changed, 96 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_sriov_migration_data.c b/drivers/gpu/drm/xe/xe_sriov_migration_data.c index a3f50836adc81..18e17706772fc 100644 --- a/drivers/gpu/drm/xe/xe_sriov_migration_data.c +++ b/drivers/gpu/drm/xe/xe_sriov_migration_data.c @@ -5,6 +5,7 @@ #include "xe_bo.h" #include "xe_device.h" +#include "xe_guc_klv_helpers.h" #include "xe_sriov_migration_data.h" #include "xe_sriov_pf_helpers.h" #include "xe_sriov_pf_migration.h" @@ -383,11 +384,19 @@ ssize_t xe_sriov_migration_data_write(struct xe_device *xe, unsigned int vfid, return produced; } -#define MIGRATION_DESCRIPTOR_DWORDS 0 +#define MIGRATION_KLV_DEVICE_DEVID_KEY 0xf001u +#define MIGRATION_KLV_DEVICE_DEVID_LEN 1u +#define MIGRATION_KLV_DEVICE_REVID_KEY 0xf002u +#define MIGRATION_KLV_DEVICE_REVID_LEN 1u + +#define MIGRATION_DESCRIPTOR_DWORDS (GUC_KLV_LEN_MIN + MIGRATION_KLV_DEVICE_DEVID_LEN + \ + GUC_KLV_LEN_MIN + MIGRATION_KLV_DEVICE_REVID_LEN) static size_t pf_descriptor_init(struct xe_device *xe, unsigned int vfid) { struct xe_sriov_migration_data **desc = pf_pick_descriptor(xe, vfid); struct xe_sriov_migration_data *data; + unsigned int len = 0; + u32 *klvs; int ret; data = xe_sriov_migration_data_alloc(xe); @@ -401,11 +410,89 @@ static size_t pf_descriptor_init(struct xe_device *xe, unsigned int vfid) return ret; } + klvs = data->vaddr; + klvs[len++] = PREP_GUC_KLV_CONST(MIGRATION_KLV_DEVICE_DEVID_KEY, + MIGRATION_KLV_DEVICE_DEVID_LEN); + klvs[len++] = xe->info.devid; + klvs[len++] = PREP_GUC_KLV_CONST(MIGRATION_KLV_DEVICE_REVID_KEY, + MIGRATION_KLV_DEVICE_REVID_LEN); + klvs[len++] = xe->info.revid; + + xe_assert(xe, len == MIGRATION_DESCRIPTOR_DWORDS); + *desc = data; return 0; } +/** + * xe_sriov_migration_data_process_descriptor() - Process migration data descriptor. + * @xe: the &xe_device + * @vfid: the VF identifier + * @data: the &struct xe_sriov_pf_migration_data containing the descriptor + * + * The descriptor uses the same KLV format as GuC, and contains metadata used for + * checking migration data compatibility. + * + * Return: 0 on success, -errno on failure. + */ +int xe_sriov_migration_data_process_descriptor(struct xe_device *xe, unsigned int vfid, + struct xe_sriov_migration_data *data) +{ + u32 num_dwords = data->size / sizeof(u32); + u32 *klvs = data->vaddr; + + xe_assert(xe, data->type == XE_SRIOV_MIGRATION_DATA_TYPE_DESCRIPTOR); + + if (data->size % sizeof(u32)) { + xe_sriov_warn(xe, "Aborting migration, descriptor not in KLV format (size=%llu)\n", + data->size); + return -EINVAL; + } + + while (num_dwords >= GUC_KLV_LEN_MIN) { + u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]); + u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]); + + klvs += GUC_KLV_LEN_MIN; + num_dwords -= GUC_KLV_LEN_MIN; + + if (len > num_dwords) + return -EINVAL; + + switch (key) { + case MIGRATION_KLV_DEVICE_DEVID_KEY: + if (*klvs != xe->info.devid) { + xe_sriov_warn(xe, + "Aborting migration, devid mismatch %#06x!=%#06x\n", + *klvs, xe->info.devid); + return -ENODEV; + } + break; + case MIGRATION_KLV_DEVICE_REVID_KEY: + if (*klvs != xe->info.revid) { + xe_sriov_warn(xe, + "Aborting migration, revid mismatch %#06x!=%#06x\n", + *klvs, xe->info.revid); + return -ENODEV; + } + break; + default: + xe_sriov_dbg(xe, + "Skipping unknown migration descriptor key %#06x (len=%#06x)\n", + key, len); + print_hex_dump_bytes("desc: ", DUMP_PREFIX_OFFSET, klvs, + min(SZ_64, len * sizeof(u32))); + break; + } + + klvs += len; + num_dwords -= len; + } + + return 0; +} + static void pf_pending_init(struct xe_device *xe, unsigned int vfid) { struct xe_sriov_migration_data **data = pf_pick_pending(xe, vfid); diff --git a/drivers/gpu/drm/xe/xe_sriov_migration_data.h b/drivers/gpu/drm/xe/xe_sriov_migration_data.h index 7ec489c3f28d2..bb4ea5850e5c0 100644 --- a/drivers/gpu/drm/xe/xe_sriov_migration_data.h +++ b/drivers/gpu/drm/xe/xe_sriov_migration_data.h @@ -30,6 +30,8 @@ ssize_t xe_sriov_migration_data_read(struct xe_device *xe, unsigned int vfid, char __user *buf, size_t len); ssize_t xe_sriov_migration_data_write(struct xe_device *xe, unsigned int vfid, const char __user *buf, size_t len); +int xe_sriov_migration_data_process_descriptor(struct xe_device *xe, unsigned int vfid, + struct xe_sriov_migration_data *data); int xe_sriov_migration_data_save_init(struct xe_device *xe, unsigned int vfid); #endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c index 8ea531d36f53b..f0a0c2b027a20 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c @@ -175,9 +175,15 @@ xe_sriov_pf_migration_save_consume(struct xe_device *xe, unsigned int vfid) static int pf_handle_descriptor(struct xe_device *xe, unsigned int vfid, struct xe_sriov_migration_data *data) { + int ret; + if (data->tile != 0 || data->gt != 0) return -EINVAL; + ret = xe_sriov_migration_data_process_descriptor(xe, vfid, data); + if (ret) + return ret; + xe_sriov_migration_data_free(data); return 0; -- 2.50.1 The size is normally used to make a decision on when to stop the device (mainly when it's in a pre_copy state). Signed-off-by: Michał Winiarski Reviewed-by: Michal Wajdeczko --- drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 21 +++++++++++++ drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h | 2 ++ drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c | 29 ++++++++++++++++++ drivers/gpu/drm/xe/xe_sriov_pf_migration.c | 30 +++++++++++++++++++ drivers/gpu/drm/xe/xe_sriov_pf_migration.h | 1 + 5 files changed, 83 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c index 40b75b5fe9229..30f0e51da49ae 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c @@ -395,6 +395,27 @@ ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int } #endif /* CONFIG_DEBUG_FS */ +/** + * xe_gt_sriov_pf_migration_size() - Total size of migration data from all components within a GT. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * + * This function is for PF only. + * + * Return: total migration data size in bytes or a negative error code on failure. + */ +ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid) +{ + ssize_t total = 0; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + /* Nothing to query yet - will be updated once per-GT migration data types are added */ + return total; +} + /** * xe_gt_sriov_pf_migration_ring_empty() - Check if a migration ring is empty. * @gt: the &xe_gt diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h index 1ed2248f0a17e..e2d41750f863c 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h @@ -15,6 +15,8 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt); int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid); +ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid); + bool xe_gt_sriov_pf_migration_ring_empty(struct xe_gt *gt, unsigned int vfid); bool xe_gt_sriov_pf_migration_ring_full(struct xe_gt *gt, unsigned int vfid); void xe_gt_sriov_pf_migration_ring_free(struct xe_gt *gt, unsigned int vfid); diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c index a9a28aec22421..bc2d0b0342f22 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c @@ -14,6 +14,7 @@ #include "xe_sriov_pf_control.h" #include "xe_sriov_pf_debugfs.h" #include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_migration.h" #include "xe_sriov_pf_provision.h" #include "xe_sriov_pf_service.h" #include "xe_sriov_printk.h" @@ -254,6 +255,33 @@ static const struct file_operations data_vf_fops = { .llseek = default_llseek, }; +static ssize_t size_read(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) +{ + struct dentry *dent = file_dentry(file)->d_parent; + struct xe_device *xe = extract_xe(dent); + unsigned int vfid = extract_vfid(dent); + char buf[21]; + ssize_t ret; + int len; + + xe_pm_runtime_get(xe); + ret = xe_sriov_pf_migration_size(xe, vfid); + xe_pm_runtime_put(xe); + if (ret < 0) + return ret; + + len = scnprintf(buf, sizeof(buf), "%zd\n", ret); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations size_vf_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = size_read, + .llseek = default_llseek, +}; + static void pf_populate_vf(struct xe_device *xe, struct dentry *vfdent) { debugfs_create_file("pause", 0200, vfdent, xe, &pause_vf_fops); @@ -263,6 +291,7 @@ static void pf_populate_vf(struct xe_device *xe, struct dentry *vfdent) debugfs_create_file("save", 0600, vfdent, xe, &save_vf_fops); debugfs_create_file("restore", 0600, vfdent, xe, &restore_vf_fops); debugfs_create_file("migration_data", 0600, vfdent, xe, &data_vf_fops); + debugfs_create_file("migration_size", 0400, vfdent, xe, &size_vf_fops); } static void pf_populate_with_tiles(struct xe_device *xe, struct dentry *dent, unsigned int vfid) diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c index f0a0c2b027a20..6992c227e5a44 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c @@ -241,3 +241,33 @@ int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfi return xe_gt_sriov_pf_migration_restore_produce(gt, vfid, data); } + +/** + * xe_sriov_pf_migration_size() - Total size of migration data from all components within a device + * @xe: the &xe_device + * @vfid: the VF identifier (can't be 0) + * + * This function is for PF only. + * + * Return: total migration data size in bytes or a negative error code on failure. + */ +ssize_t xe_sriov_pf_migration_size(struct xe_device *xe, unsigned int vfid) +{ + size_t size = 0; + struct xe_gt *gt; + ssize_t ret; + u8 gt_id; + + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid); + + for_each_gt(gt, xe, gt_id) { + ret = xe_gt_sriov_pf_migration_size(gt, vfid); + if (ret < 0) + return ret; + + size += ret; + } + + return size; +} diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h index df81a540c246a..16cb444c36aa6 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h @@ -18,6 +18,7 @@ int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfi struct xe_sriov_migration_data *data); struct xe_sriov_migration_data * xe_sriov_pf_migration_save_consume(struct xe_device *xe, unsigned int vfid); +ssize_t xe_sriov_pf_migration_size(struct xe_device *xe, unsigned int vfid); wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid); #endif -- 2.50.1 In upcoming changes the cached buffers are going to be used to read data produced by the GuC. Add a counterpart to flush, which synchronizes the CPU-side of suballocation with the GPU data and propagate the interface to GuC Buffer Cache. Signed-off-by: Michał Winiarski Reviewed-by: Michal Wajdeczko --- drivers/gpu/drm/xe/xe_guc_buf.c | 13 +++++++++++++ drivers/gpu/drm/xe/xe_guc_buf.h | 1 + drivers/gpu/drm/xe/xe_sa.c | 21 +++++++++++++++++++++ drivers/gpu/drm/xe/xe_sa.h | 1 + 4 files changed, 36 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c index 502ca3a4ee606..4d8a4712309f4 100644 --- a/drivers/gpu/drm/xe/xe_guc_buf.c +++ b/drivers/gpu/drm/xe/xe_guc_buf.c @@ -115,6 +115,19 @@ void xe_guc_buf_release(const struct xe_guc_buf buf) xe_sa_bo_free(buf.sa, NULL); } +/** + * xe_guc_buf_sync_read() - Copy the data from the GPU memory to the sub-allocation. + * @buf: the &xe_guc_buf to sync + * + * Return: a CPU pointer of the sub-allocation. + */ +void *xe_guc_buf_sync_read(const struct xe_guc_buf buf) +{ + xe_sa_bo_sync_read(buf.sa); + + return xe_sa_bo_cpu_addr(buf.sa); +} + /** * xe_guc_buf_flush() - Copy the data from the sub-allocation to the GPU memory. * @buf: the &xe_guc_buf to flush diff --git a/drivers/gpu/drm/xe/xe_guc_buf.h b/drivers/gpu/drm/xe/xe_guc_buf.h index 0d67604d96bdd..c5e0f1fd24d74 100644 --- a/drivers/gpu/drm/xe/xe_guc_buf.h +++ b/drivers/gpu/drm/xe/xe_guc_buf.h @@ -30,6 +30,7 @@ static inline bool xe_guc_buf_is_valid(const struct xe_guc_buf buf) } void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf); +void *xe_guc_buf_sync_read(const struct xe_guc_buf buf); u64 xe_guc_buf_flush(const struct xe_guc_buf buf); u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf); u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size); diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index fedd017d6dd36..63a5263dcf1b1 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -110,6 +110,10 @@ struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, return drm_suballoc_new(&sa_manager->base, size, gfp, true, 0); } +/** + * xe_sa_bo_flush_write() - Copy the data from the sub-allocation to the GPU memory. + * @sa_bo: the &drm_suballoc to flush + */ void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo) { struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager); @@ -123,6 +127,23 @@ void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo) drm_suballoc_size(sa_bo)); } +/** + * xe_sa_bo_sync_read() - Copy the data from GPU memory to the sub-allocation. + * @sa_bo: the &drm_suballoc to sync + */ +void xe_sa_bo_sync_read(struct drm_suballoc *sa_bo) +{ + struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager); + struct xe_device *xe = tile_to_xe(sa_manager->bo->tile); + + if (!sa_manager->bo->vmap.is_iomem) + return; + + xe_map_memcpy_from(xe, xe_sa_bo_cpu_addr(sa_bo), &sa_manager->bo->vmap, + drm_suballoc_soffset(sa_bo), + drm_suballoc_size(sa_bo)); +} + void xe_sa_bo_free(struct drm_suballoc *sa_bo, struct dma_fence *fence) { diff --git a/drivers/gpu/drm/xe/xe_sa.h b/drivers/gpu/drm/xe/xe_sa.h index 99dbf0eea5402..1be7443508361 100644 --- a/drivers/gpu/drm/xe/xe_sa.h +++ b/drivers/gpu/drm/xe/xe_sa.h @@ -37,6 +37,7 @@ static inline struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager } void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo); +void xe_sa_bo_sync_read(struct drm_suballoc *sa_bo); void xe_sa_bo_free(struct drm_suballoc *sa_bo, struct dma_fence *fence); static inline struct xe_sa_manager * -- 2.50.1 An upcoming change will use GuC buffer cache as a place where GuC migration data will be stored, and the memory requirement for that is larger than indirect data. Allow the caller to pass the size based on the intended usecase. Signed-off-by: Michał Winiarski Reviewed-by: Michal Wajdeczko --- drivers/gpu/drm/xe/xe_guc_buf.c | 44 ++++++++++++++++++++++++--------- drivers/gpu/drm/xe/xe_guc_buf.h | 1 + 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c index 4d8a4712309f4..3ce4425001303 100644 --- a/drivers/gpu/drm/xe/xe_guc_buf.c +++ b/drivers/gpu/drm/xe/xe_guc_buf.c @@ -13,6 +13,8 @@ #include "xe_guc_buf.h" #include "xe_sa.h" +#define XE_GUC_BUF_CACHE_DEFAULT_SIZE SZ_8K + static struct xe_guc *cache_to_guc(struct xe_guc_buf_cache *cache) { return container_of(cache, struct xe_guc, buf); @@ -23,21 +25,12 @@ static struct xe_gt *cache_to_gt(struct xe_guc_buf_cache *cache) return guc_to_gt(cache_to_guc(cache)); } -/** - * xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache. - * @cache: the &xe_guc_buf_cache to initialize - * - * The Buffer Cache allows to obtain a reusable buffer that can be used to pass - * indirect H2G data to GuC without a need to create a ad-hoc allocation. - * - * Return: 0 on success or a negative error code on failure. - */ -int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache) +static int guc_buf_cache_init(struct xe_guc_buf_cache *cache, u32 size) { struct xe_gt *gt = cache_to_gt(cache); struct xe_sa_manager *sam; - sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32)); + sam = __xe_sa_bo_manager_init(gt_to_tile(gt), size, 0, sizeof(u32)); if (IS_ERR(sam)) return PTR_ERR(sam); cache->sam = sam; @@ -48,6 +41,35 @@ int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache) return 0; } +/** + * xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache. + * @cache: the &xe_guc_buf_cache to initialize + * + * The Buffer Cache allows to obtain a reusable buffer that can be used to pass + * data to GuC or read data from GuC without a need to create a ad-hoc allocation. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache) +{ + return guc_buf_cache_init(cache, XE_GUC_BUF_CACHE_DEFAULT_SIZE); +} + +/** + * xe_guc_buf_cache_init_with_size() - Initialize the GuC Buffer Cache. + * @cache: the &xe_guc_buf_cache to initialize + * @size: size in bytes + * + * Like xe_guc_buf_cache_init(), except it allows the caller to make the cache + * buffer larger, allowing to accommodate larger objects. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_guc_buf_cache_init_with_size(struct xe_guc_buf_cache *cache, u32 size) +{ + return guc_buf_cache_init(cache, max(XE_GUC_BUF_CACHE_DEFAULT_SIZE, size)); +} + /** * xe_guc_buf_cache_dwords() - Number of dwords the GuC Buffer Cache supports. * @cache: the &xe_guc_buf_cache to query diff --git a/drivers/gpu/drm/xe/xe_guc_buf.h b/drivers/gpu/drm/xe/xe_guc_buf.h index c5e0f1fd24d74..e3cca553fb004 100644 --- a/drivers/gpu/drm/xe/xe_guc_buf.h +++ b/drivers/gpu/drm/xe/xe_guc_buf.h @@ -12,6 +12,7 @@ #include "xe_guc_buf_types.h" int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache); +int xe_guc_buf_cache_init_with_size(struct xe_guc_buf_cache *cache, u32 size); u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache); struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords); struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache, -- 2.50.1 Contiguous PF GGTT VMAs can be scarce after creating VFs. Increase the GuC buffer cache size to 8M for PF so that we can fit GuC migration data (which currently maxes out at just under 4M) and use the cache instead of allocating fresh BOs. Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 46 ++++++------------- drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h | 3 ++ drivers/gpu/drm/xe/xe_guc.c | 12 ++++- 3 files changed, 28 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c index 30f0e51da49ae..a2db127982638 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c @@ -11,7 +11,7 @@ #include "xe_gt_sriov_pf_helpers.h" #include "xe_gt_sriov_pf_migration.h" #include "xe_gt_sriov_printk.h" -#include "xe_guc.h" +#include "xe_guc_buf.h" #include "xe_guc_ct.h" #include "xe_sriov.h" #include "xe_sriov_migration_data.h" @@ -57,73 +57,55 @@ static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid) /* Return: number of state dwords saved or a negative error code on failure */ static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid, - void *buff, size_t size) + void *dst, size_t size) { const int ndwords = size / sizeof(u32); - struct xe_tile *tile = gt_to_tile(gt); - struct xe_device *xe = tile_to_xe(tile); struct xe_guc *guc = >->uc.guc; - struct xe_bo *bo; + CLASS(xe_guc_buf, buf)(&guc->buf, ndwords); int ret; xe_gt_assert(gt, size % sizeof(u32) == 0); xe_gt_assert(gt, size == ndwords * sizeof(u32)); - bo = xe_bo_create_pin_map_novm(xe, tile, - ALIGN(size, PAGE_SIZE), - ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE, false); - if (IS_ERR(bo)) - return PTR_ERR(bo); + if (!xe_guc_buf_is_valid(buf)) + return -ENOBUFS; + + memset(xe_guc_buf_cpu_ptr(buf), 0, size); ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_SAVE, - xe_bo_ggtt_addr(bo), ndwords); + xe_guc_buf_flush(buf), ndwords); if (!ret) ret = -ENODATA; else if (ret > ndwords) ret = -EPROTO; else if (ret > 0) - xe_map_memcpy_from(xe, buff, &bo->vmap, 0, ret * sizeof(u32)); + memcpy(dst, xe_guc_buf_sync_read(buf), ret * sizeof(u32)); - xe_bo_unpin_map_no_vm(bo); return ret; } /* Return: number of state dwords restored or a negative error code on failure */ static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid, - const void *buff, size_t size) + const void *src, size_t size) { const int ndwords = size / sizeof(u32); - struct xe_tile *tile = gt_to_tile(gt); - struct xe_device *xe = tile_to_xe(tile); struct xe_guc *guc = >->uc.guc; - struct xe_bo *bo; + CLASS(xe_guc_buf_from_data, buf)(&guc->buf, src, size); int ret; xe_gt_assert(gt, size % sizeof(u32) == 0); xe_gt_assert(gt, size == ndwords * sizeof(u32)); - bo = xe_bo_create_pin_map_novm(xe, tile, - ALIGN(size, PAGE_SIZE), - ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE, false); - if (IS_ERR(bo)) - return PTR_ERR(bo); - - xe_map_memcpy_to(xe, &bo->vmap, 0, buff, size); + if (!xe_guc_buf_is_valid(buf)) + return -ENOBUFS; ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_RESTORE, - xe_bo_ggtt_addr(bo), ndwords); + xe_guc_buf_flush(buf), ndwords); if (!ret) ret = -ENODATA; else if (ret > ndwords) ret = -EPROTO; - xe_bo_unpin_map_no_vm(bo); return ret; } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h index e2d41750f863c..4f2f2783339c3 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h @@ -11,6 +11,9 @@ struct xe_gt; struct xe_sriov_migration_data; +/* TODO: get this information by querying GuC in the future */ +#define XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE SZ_8M + int xe_gt_sriov_pf_migration_init(struct xe_gt *gt); int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid); diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index ecc3e091b89e6..badae9a26220e 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -24,6 +24,7 @@ #include "xe_gt_printk.h" #include "xe_gt_sriov_vf.h" #include "xe_gt_throttle.h" +#include "xe_gt_sriov_pf_migration.h" #include "xe_guc_ads.h" #include "xe_guc_buf.h" #include "xe_guc_capture.h" @@ -40,6 +41,7 @@ #include "xe_mmio.h" #include "xe_platform_types.h" #include "xe_sriov.h" +#include "xe_sriov_pf_migration.h" #include "xe_uc.h" #include "xe_uc_fw.h" #include "xe_wa.h" @@ -821,6 +823,14 @@ static int vf_guc_init_post_hwconfig(struct xe_guc *guc) return 0; } +static u32 guc_additional_cache_size(struct xe_device *xe) +{ + if (IS_SRIOV_PF(xe) && xe_sriov_pf_migration_supported(xe)) + return XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE; + else + return 0; /* Fallback to default size */ +} + /** * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load * @guc: The GuC object @@ -860,7 +870,7 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc) if (ret) return ret; - ret = xe_guc_buf_cache_init(&guc->buf); + ret = xe_guc_buf_cache_init_with_size(&guc->buf, guc_additional_cache_size(guc_to_xe(guc))); if (ret) return ret; -- 2.50.1 In upcoming changes, SR-IOV VF migration data will be extended beyond GuC data and exported to userspace using VFIO interface (with a vendor-specific variant driver) and a device-level debugfs interface. Remove the GT-level debugfs. Signed-off-by: Michał Winiarski Reviewed-by: Michal Wajdeczko --- drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c | 47 --------------------- 1 file changed, 47 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c index 838beb7f6327f..5278ea4fd6552 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c @@ -327,9 +327,6 @@ static const struct { { "stop", xe_gt_sriov_pf_control_stop_vf }, { "pause", xe_gt_sriov_pf_control_pause_vf }, { "resume", xe_gt_sriov_pf_control_resume_vf }, -#ifdef CONFIG_DRM_XE_DEBUG_SRIOV - { "restore!", xe_gt_sriov_pf_migration_restore_guc_state }, -#endif }; static ssize_t control_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) @@ -393,47 +390,6 @@ static const struct file_operations control_ops = { .llseek = default_llseek, }; -/* - * /sys/kernel/debug/dri/BDF/ - * ├── sriov - * : ├── vf1 - * : ├── tile0 - * : ├── gt0 - * : ├── guc_state - */ - -static ssize_t guc_state_read(struct file *file, char __user *buf, - size_t count, loff_t *pos) -{ - struct dentry *dent = file_dentry(file); - struct dentry *parent = dent->d_parent; - struct xe_gt *gt = extract_gt(parent); - unsigned int vfid = extract_vfid(parent); - - return xe_gt_sriov_pf_migration_read_guc_state(gt, vfid, buf, count, pos); -} - -static ssize_t guc_state_write(struct file *file, const char __user *buf, - size_t count, loff_t *pos) -{ - struct dentry *dent = file_dentry(file); - struct dentry *parent = dent->d_parent; - struct xe_gt *gt = extract_gt(parent); - unsigned int vfid = extract_vfid(parent); - - if (*pos) - return -EINVAL; - - return xe_gt_sriov_pf_migration_write_guc_state(gt, vfid, buf, count); -} - -static const struct file_operations guc_state_ops = { - .owner = THIS_MODULE, - .read = guc_state_read, - .write = guc_state_write, - .llseek = default_llseek, -}; - /* * /sys/kernel/debug/dri/BDF/ * ├── sriov @@ -568,9 +524,6 @@ static void pf_populate_gt(struct xe_gt *gt, struct dentry *dent, unsigned int v /* for testing/debugging purposes only! */ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { - debugfs_create_file("guc_state", - IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400, - dent, NULL, &guc_state_ops); debugfs_create_file("config_blob", IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400, dent, NULL, &config_blob_ops); -- 2.50.1 In upcoming changes, the GuC VF migration data will be handled as part of separate SAVE/RESTORE states in VF control state machine. Remove it from PAUSE state. Signed-off-by: Michał Winiarski Reviewed-by: Michal Wajdeczko --- drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c | 39 +------------------ .../gpu/drm/xe/xe_gt_sriov_pf_control_types.h | 2 - 2 files changed, 2 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index 02019b646389c..203ba1fbeefcd 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -183,7 +183,6 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit) CASE2STR(PAUSE_SEND_PAUSE); CASE2STR(PAUSE_WAIT_GUC); CASE2STR(PAUSE_GUC_DONE); - CASE2STR(PAUSE_SAVE_GUC); CASE2STR(PAUSE_FAILED); CASE2STR(PAUSED); CASE2STR(SAVE_WIP); @@ -453,8 +452,7 @@ static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid) * : PAUSE_GUC_DONE o-----restart * : | : * : | o---<--busy : - * : v / / : - * : PAUSE_SAVE_GUC : + * : / : * : / : * : / : * :....o..............o...............o...........: @@ -474,7 +472,6 @@ static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid) pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE); - pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC); } } @@ -505,41 +502,12 @@ static void pf_enter_vf_pause_rejected(struct xe_gt *gt, unsigned int vfid) pf_enter_vf_pause_failed(gt, vfid); } -static void pf_enter_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid) -{ - if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC)) - pf_enter_vf_state_machine_bug(gt, vfid); -} - -static bool pf_exit_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid) -{ - int err; - - if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC)) - return false; - - err = xe_gt_sriov_pf_migration_save_guc_state(gt, vfid); - if (err) { - /* retry if busy */ - if (err == -EBUSY) { - pf_enter_vf_pause_save_guc(gt, vfid); - return true; - } - /* give up on error */ - if (err == -EIO) - pf_enter_vf_mismatch(gt, vfid); - } - - pf_enter_vf_pause_completed(gt, vfid); - return true; -} - static bool pf_exit_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid) { if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE)) return false; - pf_enter_vf_pause_save_guc(gt, vfid); + pf_enter_vf_pause_completed(gt, vfid); return true; } @@ -1942,9 +1910,6 @@ static bool pf_process_vf_state_machine(struct xe_gt *gt, unsigned int vfid) if (pf_exit_vf_pause_guc_done(gt, vfid)) return true; - if (pf_exit_vf_pause_save_guc(gt, vfid)) - return true; - if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA)) { xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid, control_bit_to_string(XE_GT_SRIOV_STATE_SAVE_WAIT_DATA)); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h index a14c738581ae2..c2ec0403b2be7 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h @@ -28,7 +28,6 @@ * @XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE: indicates that the PF is about to send a PAUSE command. * @XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC: indicates that the PF awaits for a response from the GuC. * @XE_GT_SRIOV_STATE_PAUSE_GUC_DONE: indicates that the PF has received a response from the GuC. - * @XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC: indicates that the PF needs to save the VF GuC state. * @XE_GT_SRIOV_STATE_PAUSE_FAILED: indicates that a VF pause operation has failed. * @XE_GT_SRIOV_STATE_PAUSED: indicates that the VF is paused. * @XE_GT_SRIOV_STATE_SAVE_WIP: indicates that VF save operation is in progress. @@ -71,7 +70,6 @@ enum xe_gt_sriov_control_bits { XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE, - XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC, XE_GT_SRIOV_STATE_PAUSE_FAILED, XE_GT_SRIOV_STATE_PAUSED, -- 2.50.1 In upcoming changes, the GuC VF migration data will be handled as part of separate SAVE/RESTORE states in VF control state machine. Now that the data is decoupled from both guc_state debugfs and PAUSE state, we can safely remove the struct xe_gt_sriov_state_snapshot and modify the GuC save/restore functions to operate on struct xe_sriov_migration_data. Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 265 +++++------------- drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h | 13 +- .../drm/xe/xe_gt_sriov_pf_migration_types.h | 27 -- drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h | 4 - 4 files changed, 79 insertions(+), 230 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c index a2db127982638..4a716e0a29fe4 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c @@ -28,6 +28,17 @@ static struct xe_gt_sriov_migration_data *pf_pick_gt_migration(struct xe_gt *gt, return >->sriov.pf.vfs[vfid].migration; } +static void pf_dump_mig_data(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data) +{ + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { + print_hex_dump_bytes("mig_hdr: ", DUMP_PREFIX_OFFSET, + &data->hdr, sizeof(data->hdr)); + print_hex_dump_bytes("mig_data: ", DUMP_PREFIX_OFFSET, + data->vaddr, min(SZ_64, data->size)); + } +} + /* Return: number of dwords saved/restored/required or a negative error code on failure */ static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode, u64 addr, u32 ndwords) @@ -47,7 +58,7 @@ static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode, } /* Return: size of the state in dwords or a negative error code on failure */ -static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid) +static int pf_send_guc_query_vf_mig_data_size(struct xe_gt *gt, unsigned int vfid) { int ret; @@ -56,8 +67,8 @@ static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid) } /* Return: number of state dwords saved or a negative error code on failure */ -static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid, - void *dst, size_t size) +static int pf_send_guc_save_vf_mig_data(struct xe_gt *gt, unsigned int vfid, + void *dst, size_t size) { const int ndwords = size / sizeof(u32); struct xe_guc *guc = >->uc.guc; @@ -85,8 +96,8 @@ static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid, } /* Return: number of state dwords restored or a negative error code on failure */ -static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid, - const void *src, size_t size) +static int pf_send_guc_restore_vf_mig_data(struct xe_gt *gt, unsigned int vfid, + const void *src, size_t size) { const int ndwords = size / sizeof(u32); struct xe_guc *guc = >->uc.guc; @@ -114,120 +125,67 @@ static bool pf_migration_supported(struct xe_gt *gt) return xe_sriov_pf_migration_supported(gt_to_xe(gt)); } -static struct mutex *pf_migration_mutex(struct xe_gt *gt) +static int pf_save_vf_guc_mig_data(struct xe_gt *gt, unsigned int vfid) { - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); - return >->sriov.pf.migration.snapshot_lock; -} - -static struct xe_gt_sriov_state_snapshot *pf_pick_vf_snapshot(struct xe_gt *gt, - unsigned int vfid) -{ - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); - xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); - lockdep_assert_held(pf_migration_mutex(gt)); - - return >->sriov.pf.vfs[vfid].snapshot; -} - -static unsigned int pf_snapshot_index(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot) -{ - return container_of(snapshot, struct xe_gt_sriov_metadata, snapshot) - gt->sriov.pf.vfs; -} - -static void pf_free_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot) -{ - struct xe_device *xe = gt_to_xe(gt); - - drmm_kfree(&xe->drm, snapshot->guc.buff); - snapshot->guc.buff = NULL; - snapshot->guc.size = 0; -} - -static int pf_alloc_guc_state(struct xe_gt *gt, - struct xe_gt_sriov_state_snapshot *snapshot, - size_t size) -{ - struct xe_device *xe = gt_to_xe(gt); - void *p; - - pf_free_guc_state(gt, snapshot); - - if (!size) - return -ENODATA; - - if (size % sizeof(u32)) - return -EINVAL; - - if (size > SZ_2M) - return -EFBIG; - - p = drmm_kzalloc(&xe->drm, size, GFP_KERNEL); - if (!p) - return -ENOMEM; - - snapshot->guc.buff = p; - snapshot->guc.size = size; - return 0; -} - -static void pf_dump_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot) -{ - if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { - unsigned int vfid __maybe_unused = pf_snapshot_index(gt, snapshot); - - xe_gt_sriov_dbg_verbose(gt, "VF%u GuC state is %zu dwords:\n", - vfid, snapshot->guc.size / sizeof(u32)); - print_hex_dump_bytes("state: ", DUMP_PREFIX_OFFSET, - snapshot->guc.buff, min(SZ_64, snapshot->guc.size)); - } -} - -static int pf_save_vf_guc_state(struct xe_gt *gt, unsigned int vfid) -{ - struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid); + struct xe_sriov_migration_data *data; size_t size; int ret; - ret = pf_send_guc_query_vf_state_size(gt, vfid); + ret = pf_send_guc_query_vf_mig_data_size(gt, vfid); if (ret < 0) goto fail; + size = ret * sizeof(u32); - xe_gt_sriov_dbg_verbose(gt, "VF%u state size is %d dwords (%zu bytes)\n", vfid, ret, size); - ret = pf_alloc_guc_state(gt, snapshot, size); - if (ret < 0) + data = xe_sriov_migration_data_alloc(gt_to_xe(gt)); + if (!data) { + ret = -ENOMEM; goto fail; + } + + ret = xe_sriov_migration_data_init(data, gt->tile->id, gt->info.id, + XE_SRIOV_MIGRATION_DATA_TYPE_GUC, 0, size); + if (ret) + goto fail_free; - ret = pf_send_guc_save_vf_state(gt, vfid, snapshot->guc.buff, size); + ret = pf_send_guc_save_vf_mig_data(gt, vfid, data->vaddr, size); if (ret < 0) - goto fail; + goto fail_free; size = ret * sizeof(u32); xe_gt_assert(gt, size); - xe_gt_assert(gt, size <= snapshot->guc.size); - snapshot->guc.size = size; + xe_gt_assert(gt, size <= data->size); + data->size = size; + data->remaining = size; + + xe_gt_sriov_dbg_verbose(gt, "VF%u GuC data save (%zu bytes)\n", vfid, size); + pf_dump_mig_data(gt, vfid, data); + + ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data); + if (ret) + goto fail_free; - pf_dump_guc_state(gt, snapshot); return 0; +fail_free: + xe_sriov_migration_data_free(data); fail: - xe_gt_sriov_dbg(gt, "Unable to save VF%u state (%pe)\n", vfid, ERR_PTR(ret)); - pf_free_guc_state(gt, snapshot); + xe_gt_sriov_err(gt, "Failed to save VF%u GuC data (%pe)\n", + vfid, ERR_PTR(ret)); return ret; } /** - * xe_gt_sriov_pf_migration_save_guc_state() - Take a GuC VF state snapshot. + * xe_gt_sriov_pf_migration_guc_size() - Get the size of VF GuC migration data. * @gt: the &xe_gt * @vfid: the VF identifier * * This function is for PF only. * - * Return: 0 on success or a negative error code on failure. + * Return: size in bytes or a negative error code on failure. */ -int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid) +ssize_t xe_gt_sriov_pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid) { - int err; + ssize_t size; xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); xe_gt_assert(gt, vfid != PFID); @@ -236,37 +194,15 @@ int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid) if (!pf_migration_supported(gt)) return -ENOPKG; - mutex_lock(pf_migration_mutex(gt)); - err = pf_save_vf_guc_state(gt, vfid); - mutex_unlock(pf_migration_mutex(gt)); - - return err; -} - -static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid) -{ - struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid); - int ret; - - if (!snapshot->guc.size) - return -ENODATA; - - xe_gt_sriov_dbg_verbose(gt, "restoring %zu dwords of VF%u GuC state\n", - snapshot->guc.size / sizeof(u32), vfid); - ret = pf_send_guc_restore_vf_state(gt, vfid, snapshot->guc.buff, snapshot->guc.size); - if (ret < 0) - goto fail; - - xe_gt_sriov_dbg_verbose(gt, "restored %d dwords of VF%u GuC state\n", ret, vfid); - return 0; + size = pf_send_guc_query_vf_mig_data_size(gt, vfid); + if (size >= 0) + size *= sizeof(u32); -fail: - xe_gt_sriov_dbg(gt, "Failed to restore VF%u GuC state (%pe)\n", vfid, ERR_PTR(ret)); - return ret; + return size; } /** - * xe_gt_sriov_pf_migration_restore_guc_state() - Restore a GuC VF state. + * xe_gt_sriov_pf_migration_guc_save() - Save VF GuC migration data. * @gt: the &xe_gt * @vfid: the VF identifier * @@ -274,10 +210,8 @@ static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid) * * Return: 0 on success or a negative error code on failure. */ -int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid) +int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid) { - int ret; - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); xe_gt_assert(gt, vfid != PFID); xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); @@ -285,75 +219,43 @@ int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vf if (!pf_migration_supported(gt)) return -ENOPKG; - mutex_lock(pf_migration_mutex(gt)); - ret = pf_restore_vf_guc_state(gt, vfid); - mutex_unlock(pf_migration_mutex(gt)); - - return ret; + return pf_save_vf_guc_mig_data(gt, vfid); } -#ifdef CONFIG_DEBUG_FS -/** - * xe_gt_sriov_pf_migration_read_guc_state() - Read a GuC VF state. - * @gt: the &xe_gt - * @vfid: the VF identifier - * @buf: the user space buffer to read to - * @count: the maximum number of bytes to read - * @pos: the current position in the buffer - * - * This function is for PF only. - * - * This function reads up to @count bytes from the saved VF GuC state buffer - * at offset @pos into the user space address starting at @buf. - * - * Return: the number of bytes read or a negative error code on failure. - */ -ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid, - char __user *buf, size_t count, loff_t *pos) +static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data) { - struct xe_gt_sriov_state_snapshot *snapshot; - ssize_t ret; + int ret; - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); - xe_gt_assert(gt, vfid != PFID); - xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + xe_gt_assert(gt, data->size); - if (!pf_migration_supported(gt)) - return -ENOPKG; + xe_gt_sriov_dbg_verbose(gt, "VF%u GuC data restore (%llu bytes)\n", vfid, data->size); + pf_dump_mig_data(gt, vfid, data); - mutex_lock(pf_migration_mutex(gt)); - snapshot = pf_pick_vf_snapshot(gt, vfid); - if (snapshot->guc.size) - ret = simple_read_from_buffer(buf, count, pos, snapshot->guc.buff, - snapshot->guc.size); - else - ret = -ENODATA; - mutex_unlock(pf_migration_mutex(gt)); + ret = pf_send_guc_restore_vf_mig_data(gt, vfid, data->vaddr, data->size); + if (ret < 0) + goto fail; + + return 0; +fail: + xe_gt_sriov_err(gt, "Failed to restore VF%u GuC data (%pe)\n", + vfid, ERR_PTR(ret)); return ret; } /** - * xe_gt_sriov_pf_migration_write_guc_state() - Write a GuC VF state. + * xe_gt_sriov_pf_migration_guc_restore() - Restore VF GuC migration data. * @gt: the &xe_gt * @vfid: the VF identifier - * @buf: the user space buffer with GuC VF state - * @size: the size of GuC VF state (in bytes) * * This function is for PF only. * - * This function reads @size bytes of the VF GuC state stored at user space - * address @buf and writes it into a internal VF state buffer. - * - * Return: the number of bytes used or a negative error code on failure. + * Return: 0 on success or a negative error code on failure. */ -ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid, - const char __user *buf, size_t size) +int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data) { - struct xe_gt_sriov_state_snapshot *snapshot; - loff_t pos = 0; - ssize_t ret; - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); xe_gt_assert(gt, vfid != PFID); xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); @@ -361,21 +263,8 @@ ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int if (!pf_migration_supported(gt)) return -ENOPKG; - mutex_lock(pf_migration_mutex(gt)); - snapshot = pf_pick_vf_snapshot(gt, vfid); - ret = pf_alloc_guc_state(gt, snapshot, size); - if (!ret) { - ret = simple_write_to_buffer(snapshot->guc.buff, size, &pos, buf, size); - if (ret < 0) - pf_free_guc_state(gt, snapshot); - else - pf_dump_guc_state(gt, snapshot); - } - mutex_unlock(pf_migration_mutex(gt)); - - return ret; + return pf_restore_vf_guc_state(gt, vfid, data); } -#endif /* CONFIG_DEBUG_FS */ /** * xe_gt_sriov_pf_migration_size() - Total size of migration data from all components within a GT. @@ -599,10 +488,6 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt) if (!pf_migration_supported(gt)) return 0; - err = drmm_mutex_init(&xe->drm, >->sriov.pf.migration.snapshot_lock); - if (err) - return err; - totalvfs = xe_sriov_pf_get_totalvfs(xe); for (n = 1; n <= totalvfs; n++) { struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, n); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h index 4f2f2783339c3..b3c18e369df79 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h @@ -15,8 +15,10 @@ struct xe_sriov_migration_data; #define XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE SZ_8M int xe_gt_sriov_pf_migration_init(struct xe_gt *gt); -int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid); -int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid); +ssize_t xe_gt_sriov_pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data); ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid); @@ -34,11 +36,4 @@ int xe_gt_sriov_pf_migration_restore_produce(struct xe_gt *gt, unsigned int vfid struct xe_sriov_migration_data * xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid); -#ifdef CONFIG_DEBUG_FS -ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid, - char __user *buf, size_t count, loff_t *pos); -ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid, - const char __user *buf, size_t count); -#endif - #endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h index 84be6fac16c8b..75d8b94cbbefb 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h @@ -6,24 +6,7 @@ #ifndef _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_ #define _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_ -#include #include -#include - -/** - * struct xe_gt_sriov_state_snapshot - GT-level per-VF state snapshot data. - * - * Used by the PF driver to maintain per-VF migration data. - */ -struct xe_gt_sriov_state_snapshot { - /** @guc: GuC VF state snapshot */ - struct { - /** @guc.buff: buffer with the VF state */ - u32 *buff; - /** @guc.size: size of the buffer (must be dwords aligned) */ - u32 size; - } guc; -}; /** * struct xe_gt_sriov_migration_data - GT-level per-VF migration data. @@ -35,14 +18,4 @@ struct xe_gt_sriov_migration_data { struct ptr_ring ring; }; -/** - * struct xe_gt_sriov_pf_migration - GT-level data. - * - * Used by the PF driver to maintain non-VF specific per-GT data. - */ -struct xe_gt_sriov_pf_migration { - /** @snapshot_lock: protects all VFs snapshots */ - struct mutex snapshot_lock; -}; - #endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h index 812e74d3f8f80..667b8310478d4 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h @@ -31,9 +31,6 @@ struct xe_gt_sriov_metadata { /** @version: negotiated VF/PF ABI version */ struct xe_gt_sriov_pf_service_version version; - /** @snapshot: snapshot of the VF state data */ - struct xe_gt_sriov_state_snapshot snapshot; - /** @migration: per-VF migration data. */ struct xe_gt_sriov_migration_data migration; }; @@ -61,7 +58,6 @@ struct xe_gt_sriov_pf { struct xe_gt_sriov_pf_service service; struct xe_gt_sriov_pf_control control; struct xe_gt_sriov_pf_policy policy; - struct xe_gt_sriov_pf_migration migration; struct xe_gt_sriov_spare_config spare; struct xe_gt_sriov_metadata *vfs; }; -- 2.50.1 Connect the helpers to allow save and restore of GuC migration data in stop_copy / resume device state. Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c | 25 ++++++- drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 66 +++++++++++++++---- drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h | 8 ++- .../drm/xe/xe_gt_sriov_pf_migration_types.h | 5 ++ 4 files changed, 87 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index 203ba1fbeefcd..cb45e97f4c4d9 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -848,6 +848,18 @@ static void pf_enter_vf_save_failed(struct xe_gt *gt, unsigned int vfid) static int pf_handle_vf_save_data(struct xe_gt *gt, unsigned int vfid) { + int ret; + + if (xe_gt_sriov_pf_migration_save_test(gt, vfid, XE_SRIOV_MIGRATION_DATA_TYPE_GUC)) { + ret = xe_gt_sriov_pf_migration_guc_save(gt, vfid); + if (ret) + return ret; + + xe_gt_sriov_pf_migration_save_clear(gt, vfid, XE_SRIOV_MIGRATION_DATA_TYPE_GUC); + + return -EAGAIN; + } + return 0; } @@ -887,6 +899,7 @@ static void pf_exit_vf_save_wait_data(struct xe_gt *gt, unsigned int vfid) static bool pf_enter_vf_save_wip(struct xe_gt *gt, unsigned int vfid) { if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) { + xe_gt_sriov_pf_migration_save_init(gt, vfid); pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA); pf_enter_vf_wip(gt, vfid); pf_queue_vf(gt, vfid); @@ -1048,14 +1061,22 @@ static void pf_enter_vf_restore_failed(struct xe_gt *gt, unsigned int vfid) static int pf_handle_vf_restore_data(struct xe_gt *gt, unsigned int vfid) { struct xe_sriov_migration_data *data = xe_gt_sriov_pf_migration_restore_consume(gt, vfid); + int ret = 0; xe_gt_assert(gt, data); - xe_gt_sriov_notice(gt, "Skipping VF%u unknown data type: %d\n", vfid, data->type); + switch (data->type) { + case XE_SRIOV_MIGRATION_DATA_TYPE_GUC: + ret = xe_gt_sriov_pf_migration_guc_restore(gt, vfid, data); + break; + default: + xe_gt_sriov_notice(gt, "Skipping VF%u unknown data type: %d\n", vfid, data->type); + break; + } xe_sriov_migration_data_free(data); - return 0; + return ret; } static bool pf_handle_vf_restore(struct xe_gt *gt, unsigned int vfid) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c index 4a716e0a29fe4..fbb451767712a 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c @@ -174,22 +174,10 @@ static int pf_save_vf_guc_mig_data(struct xe_gt *gt, unsigned int vfid) return ret; } -/** - * xe_gt_sriov_pf_migration_guc_size() - Get the size of VF GuC migration data. - * @gt: the &xe_gt - * @vfid: the VF identifier - * - * This function is for PF only. - * - * Return: size in bytes or a negative error code on failure. - */ -ssize_t xe_gt_sriov_pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid) +static ssize_t pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid) { ssize_t size; - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); - xe_gt_assert(gt, vfid != PFID); - xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); if (!pf_migration_supported(gt)) return -ENOPKG; @@ -278,12 +266,19 @@ int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid, ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid) { ssize_t total = 0; + ssize_t size; xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); xe_gt_assert(gt, vfid != PFID); xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); - /* Nothing to query yet - will be updated once per-GT migration data types are added */ + size = pf_migration_guc_size(gt, vfid); + if (size < 0) + return size; + if (size > 0) + size += sizeof(struct xe_sriov_pf_migration_hdr); + total += size; + return total; } @@ -330,6 +325,49 @@ void xe_gt_sriov_pf_migration_ring_free(struct xe_gt *gt, unsigned int vfid) xe_sriov_migration_data_free(data); } +/** + * xe_gt_sriov_pf_migration_save_init() - Initialize per-GT migration related data. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * + * Return: 0 on success or a negative error code on failure. + */ +void xe_gt_sriov_pf_migration_save_init(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid); + + migration->save.data_remaining = 0; + + xe_gt_assert(gt, pf_migration_guc_size(gt, vfid) > 0); + set_bit(XE_SRIOV_MIGRATION_DATA_TYPE_GUC, &migration->save.data_remaining); +} + +/** + * xe_gt_sriov_pf_migration_save_test() - Test if migration data type needs to be saved. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * @type: the &xe_sriov_migration_data_type of data to be saved + * + * Return: true if the data needs saving, otherwise false. + */ +bool xe_gt_sriov_pf_migration_save_test(struct xe_gt *gt, unsigned int vfid, + enum xe_sriov_migration_data_type type) +{ + return test_bit(type, &pf_pick_gt_migration(gt, vfid)->save.data_remaining); +} + +/** + * xe_gt_sriov_pf_migration_save_clear() - Clear migration data type from saving. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * @type: the &xe_sriov_migration_data_type of data to be cleared + */ +void xe_gt_sriov_pf_migration_save_clear(struct xe_gt *gt, unsigned int vfid, + enum xe_sriov_migration_data_type type) +{ + clear_bit(type, &pf_pick_gt_migration(gt, vfid)->save.data_remaining); +} + /** * xe_gt_sriov_pf_migration_save_produce() - Add VF save data packet to migration ring. * @gt: the &xe_gt diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h index b3c18e369df79..66a45d6234245 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h @@ -10,12 +10,12 @@ struct xe_gt; struct xe_sriov_migration_data; +enum xe_sriov_migration_data_type; /* TODO: get this information by querying GuC in the future */ #define XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE SZ_8M int xe_gt_sriov_pf_migration_init(struct xe_gt *gt); -ssize_t xe_gt_sriov_pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid, struct xe_sriov_migration_data *data); @@ -26,6 +26,12 @@ bool xe_gt_sriov_pf_migration_ring_empty(struct xe_gt *gt, unsigned int vfid); bool xe_gt_sriov_pf_migration_ring_full(struct xe_gt *gt, unsigned int vfid); void xe_gt_sriov_pf_migration_ring_free(struct xe_gt *gt, unsigned int vfid); +void xe_gt_sriov_pf_migration_save_init(struct xe_gt *gt, unsigned int vfid); +bool xe_gt_sriov_pf_migration_save_test(struct xe_gt *gt, unsigned int vfid, + enum xe_sriov_migration_data_type type); +void xe_gt_sriov_pf_migration_save_clear(struct xe_gt *gt, unsigned int vfid, + enum xe_sriov_migration_data_type type); + int xe_gt_sriov_pf_migration_save_produce(struct xe_gt *gt, unsigned int vfid, struct xe_sriov_migration_data *data); struct xe_sriov_migration_data * diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h index 75d8b94cbbefb..9f24878690d9c 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h @@ -16,6 +16,11 @@ struct xe_gt_sriov_migration_data { /** @ring: queue containing VF save / restore migration data */ struct ptr_ring ring; + /** @save: structure for currently processed save migration data */ + struct { + /** @save.data_remaining: bitmap of migration types that need to be saved */ + unsigned long data_remaining; + } save; }; #endif -- 2.50.1 In an upcoming change, the VF GGTT migration data will be handled as part of VF control state machine. Add the necessary helpers to allow the migration data transfer to/from the HW GGTT resource. Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_ggtt.c | 104 +++++++++++++++++++++ drivers/gpu/drm/xe/xe_ggtt.h | 4 + drivers/gpu/drm/xe/xe_ggtt_types.h | 2 + drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 52 +++++++++++ drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h | 5 + 5 files changed, 167 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index 20d226d90c50f..00ddb4f013466 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -151,6 +151,14 @@ static void xe_ggtt_set_pte_and_flush(struct xe_ggtt *ggtt, u64 addr, u64 pte) ggtt_update_access_counter(ggtt); } +static u64 xe_ggtt_get_pte(struct xe_ggtt *ggtt, u64 addr) +{ + xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK)); + xe_tile_assert(ggtt->tile, addr < ggtt->size); + + return readq(&ggtt->gsm[addr >> XE_PTE_SHIFT]); +} + static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size) { u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB]; @@ -233,16 +241,19 @@ void xe_ggtt_might_lock(struct xe_ggtt *ggtt) static const struct xe_ggtt_pt_ops xelp_pt_ops = { .pte_encode_flags = xelp_ggtt_pte_flags, .ggtt_set_pte = xe_ggtt_set_pte, + .ggtt_get_pte = xe_ggtt_get_pte, }; static const struct xe_ggtt_pt_ops xelpg_pt_ops = { .pte_encode_flags = xelpg_ggtt_pte_flags, .ggtt_set_pte = xe_ggtt_set_pte, + .ggtt_get_pte = xe_ggtt_get_pte, }; static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = { .pte_encode_flags = xelpg_ggtt_pte_flags, .ggtt_set_pte = xe_ggtt_set_pte_and_flush, + .ggtt_get_pte = xe_ggtt_get_pte, }; static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved) @@ -889,6 +900,20 @@ u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare) return max_hole; } +/** + * xe_ggtt_node_pt_size() - Convert GGTT node size to its page table entries size. + * @node: the &xe_ggtt_node + * + * Return: GGTT node page table entries size in bytes. + */ +size_t xe_ggtt_node_pt_size(const struct xe_ggtt_node *node) +{ + if (!node) + return 0; + + return node->base.size / XE_PAGE_SIZE * sizeof(u64); +} + #ifdef CONFIG_PCI_IOV static u64 xe_encode_vfid_pte(u16 vfid) { @@ -930,6 +955,85 @@ void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid) xe_ggtt_assign_locked(node->ggtt, &node->base, vfid); mutex_unlock(&node->ggtt->lock); } + +/** + * xe_ggtt_node_save() - Save a &xe_ggtt_node to a buffer. + * @node: the &xe_ggtt_node to be saved + * @dst: destination buffer + * @size: destination buffer size in bytes + * @vfid: VF identifier + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_ggtt_node_save(struct xe_ggtt_node *node, void *dst, size_t size, u16 vfid) +{ + struct xe_ggtt *ggtt; + u64 start, end; + u64 *buf = dst; + u64 pte; + + if (!node) + return -ENOENT; + + guard(mutex)(&node->ggtt->lock); + + ggtt = node->ggtt; + start = node->base.start; + end = start + size - 1; + + if (xe_ggtt_node_pt_size(node) != size) + return -EINVAL; + + while (start < end) { + pte = ggtt->pt_ops->ggtt_get_pte(ggtt, start); + if (vfid != u64_get_bits(pte, GGTT_PTE_VFID)) + return -EPERM; + + *buf++ = u64_replace_bits(pte, 0, GGTT_PTE_VFID); + start += XE_PAGE_SIZE; + } + + return 0; +} + +/** + * xe_ggtt_node_load() - Load a &xe_ggtt_node from a buffer. + * @node: the &xe_ggtt_node to be loaded + * @src: source buffer + * @size: source buffer size in bytes + * @vfid: VF identifier + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_ggtt_node_load(struct xe_ggtt_node *node, const void *src, size_t size, u16 vfid) +{ + u64 vfid_pte = xe_encode_vfid_pte(vfid); + const u64 *buf = src; + struct xe_ggtt *ggtt; + u64 start, end; + + if (!node) + return -ENOENT; + + guard(mutex)(&node->ggtt->lock); + + ggtt = node->ggtt; + start = node->base.start; + end = start + size - 1; + + if (xe_ggtt_node_pt_size(node) != size) + return -EINVAL; + + while (start < end) { + vfid_pte = u64_replace_bits(*buf++, vfid, GGTT_PTE_VFID); + ggtt->pt_ops->ggtt_set_pte(ggtt, start, vfid_pte); + start += XE_PAGE_SIZE; + } + xe_ggtt_invalidate(ggtt); + + return 0; +} + #endif /** diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h index 75fc7a1efea76..1edf27608d39a 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.h +++ b/drivers/gpu/drm/xe/xe_ggtt.h @@ -41,8 +41,12 @@ u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare); int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p); u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer *p); +size_t xe_ggtt_node_pt_size(const struct xe_ggtt_node *node); + #ifdef CONFIG_PCI_IOV void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid); +int xe_ggtt_node_save(struct xe_ggtt_node *node, void *dst, size_t size, u16 vfid); +int xe_ggtt_node_load(struct xe_ggtt_node *node, const void *src, size_t size, u16 vfid); #endif #ifndef CONFIG_LOCKDEP diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h index c5e999d58ff2a..dacd796f81844 100644 --- a/drivers/gpu/drm/xe/xe_ggtt_types.h +++ b/drivers/gpu/drm/xe/xe_ggtt_types.h @@ -78,6 +78,8 @@ struct xe_ggtt_pt_ops { u64 (*pte_encode_flags)(struct xe_bo *bo, u16 pat_index); /** @ggtt_set_pte: Directly write into GGTT's PTE */ void (*ggtt_set_pte)(struct xe_ggtt *ggtt, u64 addr, u64 pte); + /** @ggtt_get_pte: Directly read from GGTT's PTE */ + u64 (*ggtt_get_pte)(struct xe_ggtt *ggtt, u64 addr); }; #endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index c0c0215c07036..55444883f2ac3 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -726,6 +726,58 @@ int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid, return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair); } +/** + * xe_gt_sriov_pf_config_ggtt_save() - Save a VF provisioned GGTT data into a buffer. + * @gt: the &xe_gt + * @vfid: VF identifier (can't be 0) + * @buf: the GGTT data destination buffer (or NULL to query the buf size) + * @size: the size of the buffer (or 0 to query the buf size) + * + * This function can only be called on PF. + * + * Return: size of the buffer needed to save GGTT data if querying, + * 0 on successful save or a negative error code on failure. + */ +ssize_t xe_gt_sriov_pf_config_ggtt_save(struct xe_gt *gt, unsigned int vfid, + void *buf, size_t size) +{ + struct xe_ggtt_node *node; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid); + xe_gt_assert(gt, !(!buf ^ !size)); + + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); + node = pf_pick_vf_config(gt, vfid)->ggtt_region; + + if (!buf) + return xe_ggtt_node_pt_size(node); + + return xe_ggtt_node_save(node, buf, size, vfid); +} + +/** + * xe_gt_sriov_pf_config_ggtt_restore() - Restore a VF provisioned GGTT data from a buffer. + * @gt: the &xe_gt + * @vfid: VF identifier (can't be 0) + * @buf: the GGTT data source buffer + * @size: the size of the buffer + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt *gt, unsigned int vfid, + const void *buf, size_t size) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid); + + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); + + return xe_ggtt_node_load(pf_pick_vf_config(gt, vfid)->ggtt_region, buf, size, vfid); +} + static u32 pf_get_min_spare_ctxs(struct xe_gt *gt) { /* XXX: preliminary */ diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h index 513e6512a575b..0293ba98eb6df 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h @@ -61,6 +61,11 @@ ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *bu int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid, const void *buf, size_t size); +ssize_t xe_gt_sriov_pf_config_ggtt_save(struct xe_gt *gt, unsigned int vfid, + void *buf, size_t size); +int xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt *gt, unsigned int vfid, + const void *buf, size_t size); + bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_config_init(struct xe_gt *gt); -- 2.50.1 Connect the helpers to allow save and restore of GGTT migration data in stop_copy / resume device state. Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c | 13 ++ drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 113 ++++++++++++++++++ drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h | 3 + 3 files changed, 129 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index cb45e97f4c4d9..e7ea9b88fd246 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -860,6 +860,16 @@ static int pf_handle_vf_save_data(struct xe_gt *gt, unsigned int vfid) return -EAGAIN; } + if (xe_gt_sriov_pf_migration_save_test(gt, vfid, XE_SRIOV_MIGRATION_DATA_TYPE_GGTT)) { + ret = xe_gt_sriov_pf_migration_ggtt_save(gt, vfid); + if (ret) + return ret; + + xe_gt_sriov_pf_migration_save_clear(gt, vfid, XE_SRIOV_MIGRATION_DATA_TYPE_GGTT); + + return -EAGAIN; + } + return 0; } @@ -1066,6 +1076,9 @@ static int pf_handle_vf_restore_data(struct xe_gt *gt, unsigned int vfid) xe_gt_assert(gt, data); switch (data->type) { + case XE_SRIOV_MIGRATION_DATA_TYPE_GGTT: + ret = xe_gt_sriov_pf_migration_ggtt_restore(gt, vfid, data); + break; case XE_SRIOV_MIGRATION_DATA_TYPE_GUC: ret = xe_gt_sriov_pf_migration_guc_restore(gt, vfid, data); break; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c index fbb451767712a..6f2ee5820bdd4 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c @@ -7,6 +7,9 @@ #include "abi/guc_actions_sriov_abi.h" #include "xe_bo.h" +#include "xe_ggtt.h" +#include "xe_gt.h" +#include "xe_gt_sriov_pf_config.h" #include "xe_gt_sriov_pf_control.h" #include "xe_gt_sriov_pf_helpers.h" #include "xe_gt_sriov_pf_migration.h" @@ -39,6 +42,106 @@ static void pf_dump_mig_data(struct xe_gt *gt, unsigned int vfid, } } +static ssize_t pf_migration_ggtt_size(struct xe_gt *gt, unsigned int vfid) +{ + if (!xe_gt_is_main_type(gt)) + return 0; + + return xe_gt_sriov_pf_config_ggtt_save(gt, vfid, NULL, 0); +} + +static int pf_save_vf_ggtt_mig_data(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_sriov_migration_data *data; + size_t size; + int ret; + + size = pf_migration_ggtt_size(gt, vfid); + xe_gt_assert(gt, size); + + data = xe_sriov_migration_data_alloc(gt_to_xe(gt)); + if (!data) + return -ENOMEM; + + ret = xe_sriov_migration_data_init(data, gt->tile->id, gt->info.id, + XE_SRIOV_MIGRATION_DATA_TYPE_GGTT, 0, size); + if (ret) + goto fail; + + ret = xe_gt_sriov_pf_config_ggtt_save(gt, vfid, data->vaddr, size); + if (ret) + goto fail; + + xe_gt_sriov_dbg_verbose(gt, "VF%u GGTT data save (%zu bytes)\n", vfid, size); + pf_dump_mig_data(gt, vfid, data); + + ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data); + if (ret) + goto fail; + + return 0; + +fail: + xe_sriov_migration_data_free(data); + xe_gt_sriov_err(gt, "Failed to save VF%u GGTT data (%pe)\n", vfid, ERR_PTR(ret)); + return ret; +} + +static int pf_restore_vf_ggtt_mig_data(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data) +{ + int ret; + + xe_gt_sriov_dbg_verbose(gt, "VF%u GGTT data restore (%llu bytes)\n", vfid, data->size); + pf_dump_mig_data(gt, vfid, data); + + ret = xe_gt_sriov_pf_config_ggtt_restore(gt, vfid, data->vaddr, data->size); + if (ret) { + xe_gt_sriov_err(gt, "Failed to restore VF%u GGTT data (%pe)\n", + vfid, ERR_PTR(ret)); + return ret; + } + + return 0; +} + +/** + * xe_gt_sriov_pf_migration_ggtt_save() - Save VF GGTT migration data. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_ggtt_save(struct xe_gt *gt, unsigned int vfid) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + return pf_save_vf_ggtt_mig_data(gt, vfid); +} + +/** + * xe_gt_sriov_pf_migration_ggtt_restore() - Restore VF GGTT migration data. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_ggtt_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + return pf_restore_vf_ggtt_mig_data(gt, vfid, data); +} + /* Return: number of dwords saved/restored/required or a negative error code on failure */ static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode, u64 addr, u32 ndwords) @@ -279,6 +382,13 @@ ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid) size += sizeof(struct xe_sriov_pf_migration_hdr); total += size; + size = pf_migration_ggtt_size(gt, vfid); + if (size < 0) + return size; + if (size > 0) + size += sizeof(struct xe_sriov_pf_migration_hdr); + total += size; + return total; } @@ -340,6 +450,9 @@ void xe_gt_sriov_pf_migration_save_init(struct xe_gt *gt, unsigned int vfid) xe_gt_assert(gt, pf_migration_guc_size(gt, vfid) > 0); set_bit(XE_SRIOV_MIGRATION_DATA_TYPE_GUC, &migration->save.data_remaining); + + if (pf_migration_ggtt_size(gt, vfid) > 0) + set_bit(XE_SRIOV_MIGRATION_DATA_TYPE_GGTT, &migration->save.data_remaining); } /** diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h index 66a45d6234245..bc201d8f3147a 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h @@ -19,6 +19,9 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt); int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid, struct xe_sriov_migration_data *data); +int xe_gt_sriov_pf_migration_ggtt_save(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_migration_ggtt_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data); ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid); -- 2.50.1 Implement the helpers and use them for save and restore of MMIO migration data in stop_copy / resume device state. Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_gt_sriov_pf.h | 2 + drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c | 13 ++ drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 158 ++++++++++++++++++ drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h | 3 + 4 files changed, 176 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h index e7fde3f9937af..c0dcbb1054e4d 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h @@ -6,6 +6,8 @@ #ifndef _XE_GT_SRIOV_PF_H_ #define _XE_GT_SRIOV_PF_H_ +#include + struct xe_gt; #ifdef CONFIG_PCI_IOV diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index e7ea9b88fd246..7cd7cae950bc7 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -870,6 +870,16 @@ static int pf_handle_vf_save_data(struct xe_gt *gt, unsigned int vfid) return -EAGAIN; } + if (xe_gt_sriov_pf_migration_save_test(gt, vfid, XE_SRIOV_MIGRATION_DATA_TYPE_MMIO)) { + ret = xe_gt_sriov_pf_migration_mmio_save(gt, vfid); + if (ret) + return ret; + + xe_gt_sriov_pf_migration_save_clear(gt, vfid, XE_SRIOV_MIGRATION_DATA_TYPE_MMIO); + + return -EAGAIN; + } + return 0; } @@ -1079,6 +1089,9 @@ static int pf_handle_vf_restore_data(struct xe_gt *gt, unsigned int vfid) case XE_SRIOV_MIGRATION_DATA_TYPE_GGTT: ret = xe_gt_sriov_pf_migration_ggtt_restore(gt, vfid, data); break; + case XE_SRIOV_MIGRATION_DATA_TYPE_MMIO: + ret = xe_gt_sriov_pf_migration_mmio_restore(gt, vfid, data); + break; case XE_SRIOV_MIGRATION_DATA_TYPE_GUC: ret = xe_gt_sriov_pf_migration_guc_restore(gt, vfid, data); break; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c index 6f2ee5820bdd4..5e90aeafeeb41 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c @@ -5,10 +5,13 @@ #include +#include "regs/xe_guc_regs.h" + #include "abi/guc_actions_sriov_abi.h" #include "xe_bo.h" #include "xe_ggtt.h" #include "xe_gt.h" +#include "xe_gt_sriov_pf.h" #include "xe_gt_sriov_pf_config.h" #include "xe_gt_sriov_pf_control.h" #include "xe_gt_sriov_pf_helpers.h" @@ -16,6 +19,7 @@ #include "xe_gt_sriov_printk.h" #include "xe_guc_buf.h" #include "xe_guc_ct.h" +#include "xe_mmio.h" #include "xe_sriov.h" #include "xe_sriov_migration_data.h" #include "xe_sriov_pf_migration.h" @@ -357,6 +361,150 @@ int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid, return pf_restore_vf_guc_state(gt, vfid, data); } +static ssize_t pf_migration_mmio_size(struct xe_gt *gt, unsigned int vfid) +{ + if (xe_gt_is_media_type(gt)) + return MED_VF_SW_FLAG_COUNT * sizeof(u32); + else + return VF_SW_FLAG_COUNT * sizeof(u32); +} + +static int pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size) +{ + struct xe_mmio mmio; + u32 *regs = buf; + int n; + + xe_mmio_init_vf_view(&mmio, >->mmio, vfid); + + if (size != pf_migration_mmio_size(gt, vfid)) + return -EINVAL; + + if (xe_gt_is_media_type(gt)) + for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++) + regs[n] = xe_mmio_read32(>->mmio, MED_VF_SW_FLAG(n)); + else + for (n = 0; n < VF_SW_FLAG_COUNT; n++) + regs[n] = xe_mmio_read32(>->mmio, VF_SW_FLAG(n)); + + return 0; +} + +static int pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid, + const void *buf, size_t size) +{ + const u32 *regs = buf; + struct xe_mmio mmio; + int n; + + xe_mmio_init_vf_view(&mmio, >->mmio, vfid); + + if (size != pf_migration_mmio_size(gt, vfid)) + return -EINVAL; + + if (xe_gt_is_media_type(gt)) + for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++) + xe_mmio_write32(>->mmio, MED_VF_SW_FLAG(n), regs[n]); + else + for (n = 0; n < VF_SW_FLAG_COUNT; n++) + xe_mmio_write32(>->mmio, VF_SW_FLAG(n), regs[n]); + + return 0; +} + +static int pf_save_vf_mmio_mig_data(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_sriov_migration_data *data; + size_t size; + int ret; + + size = pf_migration_mmio_size(gt, vfid); + xe_gt_assert(gt, size); + + data = xe_sriov_migration_data_alloc(gt_to_xe(gt)); + if (!data) + return -ENOMEM; + + ret = xe_sriov_migration_data_init(data, gt->tile->id, gt->info.id, + XE_SRIOV_MIGRATION_DATA_TYPE_MMIO, 0, size); + if (ret) + goto fail; + + ret = pf_migration_mmio_save(gt, vfid, data->vaddr, size); + if (ret) + goto fail; + + xe_gt_sriov_dbg_verbose(gt, "VF%u MMIO data save (%zu bytes)\n", vfid, size); + pf_dump_mig_data(gt, vfid, data); + + ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data); + if (ret) + goto fail; + + return 0; + +fail: + xe_sriov_migration_data_free(data); + xe_gt_sriov_err(gt, "Failed to save VF%u MMIO data (%pe)\n", vfid, ERR_PTR(ret)); + return ret; +} + +static int pf_restore_vf_mmio_mig_data(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data) +{ + int ret; + + xe_gt_sriov_dbg_verbose(gt, "VF%u MMIO data restore (%llu bytes)\n", vfid, data->size); + pf_dump_mig_data(gt, vfid, data); + + ret = pf_migration_mmio_restore(gt, vfid, data->vaddr, data->size); + if (ret) { + xe_gt_sriov_err(gt, "Failed to restore VF%u MMIO data (%pe)\n", + vfid, ERR_PTR(ret)); + + return ret; + } + + return 0; +} + +/** + * xe_gt_sriov_pf_migration_mmio_save() - Save VF MMIO migration data. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + return pf_save_vf_mmio_mig_data(gt, vfid); +} + +/** + * xe_gt_sriov_pf_migration_mmio_restore() - Restore VF MMIO migration data. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + return pf_restore_vf_mmio_mig_data(gt, vfid, data); +} + /** * xe_gt_sriov_pf_migration_size() - Total size of migration data from all components within a GT. * @gt: the &xe_gt @@ -389,6 +537,13 @@ ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid) size += sizeof(struct xe_sriov_pf_migration_hdr); total += size; + size = pf_migration_mmio_size(gt, vfid); + if (size < 0) + return size; + if (size > 0) + size += sizeof(struct xe_sriov_pf_migration_hdr); + total += size; + return total; } @@ -453,6 +608,9 @@ void xe_gt_sriov_pf_migration_save_init(struct xe_gt *gt, unsigned int vfid) if (pf_migration_ggtt_size(gt, vfid) > 0) set_bit(XE_SRIOV_MIGRATION_DATA_TYPE_GGTT, &migration->save.data_remaining); + + xe_gt_assert(gt, pf_migration_mmio_size(gt, vfid) > 0); + set_bit(XE_SRIOV_MIGRATION_DATA_TYPE_MMIO, &migration->save.data_remaining); } /** diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h index bc201d8f3147a..b0eec94fea3a6 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h @@ -22,6 +22,9 @@ int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid, int xe_gt_sriov_pf_migration_ggtt_save(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_migration_ggtt_restore(struct xe_gt *gt, unsigned int vfid, struct xe_sriov_migration_data *data); +int xe_gt_sriov_pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data); ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid); -- 2.50.1 From: Lukasz Laguna Instead of accessing VF's lmem_obj directly, introduce a helper function to make the access more convenient. Signed-off-by: Lukasz Laguna Signed-off-by: Michał Winiarski Reviewed-by: Michal Wajdeczko --- drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 26 ++++++++++++++++++++++ drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h | 1 + 2 files changed, 27 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 55444883f2ac3..0815f761969f0 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -1651,6 +1651,32 @@ int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, "LMEM", n, err); } +static struct xe_bo *pf_get_vf_config_lmem_obj(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); + + return config->lmem_obj; +} + +/** + * xe_gt_sriov_pf_config_get_lmem_obj() - Take a reference to the struct &xe_bo backing VF LMEM. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * + * This function can only be called on PF. + * The caller is responsible for calling xe_bo_put() on the returned object. + * + * Return: pointer to struct &xe_bo backing VF LMEM (if any). + */ +struct xe_bo *xe_gt_sriov_pf_config_get_lmem_obj(struct xe_gt *gt, unsigned int vfid) +{ + xe_gt_assert(gt, vfid); + + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); + + return xe_bo_get(pf_get_vf_config_lmem_obj(gt, vfid)); +} + static u64 pf_query_free_lmem(struct xe_gt *gt) { struct xe_tile *tile = gt->tile; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h index 0293ba98eb6df..d9fbc6c30b158 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h @@ -36,6 +36,7 @@ int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs); int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs, u64 size); +struct xe_bo *xe_gt_sriov_pf_config_get_lmem_obj(struct xe_gt *gt, unsigned int vfid); u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 exec_quantum); -- 2.50.1 From: Lukasz Laguna Introduce a new function to copy data between VRAM and sysmem objects. The existing xe_migrate_copy() is tailored for eviction and restore operations, which involves additional logic and operates on entire objects. The xe_migrate_vram_copy_chunk() allows copying chunks of data to or from a dedicated buffer object, which is essential in case of VF migration. Signed-off-by: Lukasz Laguna Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_migrate.c | 128 ++++++++++++++++++++++++++++++-- drivers/gpu/drm/xe/xe_migrate.h | 8 ++ 2 files changed, 131 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 56a5804726e96..dbe9320863ab0 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -29,6 +29,7 @@ #include "xe_lrc.h" #include "xe_map.h" #include "xe_mocs.h" +#include "xe_printk.h" #include "xe_pt.h" #include "xe_res_cursor.h" #include "xe_sa.h" @@ -1210,6 +1211,128 @@ struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate) return migrate->q; } +/** + * xe_migrate_vram_copy_chunk() - Copy a chunk of a VRAM buffer object. + * @vram_bo: The VRAM buffer object. + * @vram_offset: The VRAM offset. + * @sysmem_bo: The sysmem buffer object. + * @sysmem_offset: The sysmem offset. + * @size: The size of VRAM chunk to copy. + * @dir: The direction of the copy operation. + * + * Copies a portion of a buffer object between VRAM and system memory. + * On Xe2 platforms that support flat CCS, VRAM data is decompressed when + * copying to system memory. + * + * Return: Pointer to a dma_fence representing the last copy batch, or + * an error pointer on failure. If there is a failure, any copy operation + * started by the function call has been synced. + */ +struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset, + struct xe_bo *sysmem_bo, u64 sysmem_offset, + u64 size, enum xe_migrate_copy_dir dir) +{ + struct xe_device *xe = xe_bo_device(vram_bo); + struct xe_tile *tile = vram_bo->tile; + struct xe_gt *gt = tile->primary_gt; + struct xe_migrate *m = tile->migrate; + struct dma_fence *fence = NULL; + struct ttm_resource *vram = vram_bo->ttm.resource; + struct ttm_resource *sysmem = sysmem_bo->ttm.resource; + struct xe_res_cursor vram_it, sysmem_it; + u64 vram_L0_ofs, sysmem_L0_ofs; + u32 vram_L0_pt, sysmem_L0_pt; + u64 vram_L0, sysmem_L0; + bool to_sysmem = (dir == XE_MIGRATE_COPY_TO_SRAM); + bool use_comp_pat = to_sysmem && + GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe); + int pass = 0; + int err; + + xe_assert(xe, IS_ALIGNED(vram_offset | sysmem_offset | size, PAGE_SIZE)); + xe_assert(xe, xe_bo_is_vram(vram_bo)); + xe_assert(xe, !xe_bo_is_vram(sysmem_bo)); + xe_assert(xe, !range_overflows(vram_offset, size, (u64)vram_bo->ttm.base.size)); + xe_assert(xe, !range_overflows(sysmem_offset, size, (u64)sysmem_bo->ttm.base.size)); + + xe_res_first(vram, vram_offset, size, &vram_it); + xe_res_first_sg(xe_bo_sg(sysmem_bo), sysmem_offset, size, &sysmem_it); + + while (size) { + u32 pte_flags = PTE_UPDATE_FLAG_IS_VRAM; + u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */ + struct xe_sched_job *job; + struct xe_bb *bb; + u32 update_idx; + bool usm = xe->info.has_usm; + u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; + + sysmem_L0 = xe_migrate_res_sizes(m, &sysmem_it); + vram_L0 = min(xe_migrate_res_sizes(m, &vram_it), sysmem_L0); + + xe_dbg(xe, "Pass %u, size: %llu\n", pass++, vram_L0); + + pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0; + batch_size += pte_update_size(m, pte_flags, vram, &vram_it, &vram_L0, + &vram_L0_ofs, &vram_L0_pt, 0, 0, avail_pts); + + batch_size += pte_update_size(m, 0, sysmem, &sysmem_it, &vram_L0, &sysmem_L0_ofs, + &sysmem_L0_pt, 0, avail_pts, avail_pts); + batch_size += EMIT_COPY_DW; + + bb = xe_bb_new(gt, batch_size, usm); + if (IS_ERR(bb)) { + err = PTR_ERR(bb); + return ERR_PTR(err); + } + + if (xe_migrate_allow_identity(vram_L0, &vram_it)) + xe_res_next(&vram_it, vram_L0); + else + emit_pte(m, bb, vram_L0_pt, true, use_comp_pat, &vram_it, vram_L0, vram); + + emit_pte(m, bb, sysmem_L0_pt, false, false, &sysmem_it, vram_L0, sysmem); + + bb->cs[bb->len++] = MI_BATCH_BUFFER_END; + update_idx = bb->len; + + if (to_sysmem) + emit_copy(gt, bb, vram_L0_ofs, sysmem_L0_ofs, vram_L0, XE_PAGE_SIZE); + else + emit_copy(gt, bb, sysmem_L0_ofs, vram_L0_ofs, vram_L0, XE_PAGE_SIZE); + + job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm), + update_idx); + if (IS_ERR(job)) { + xe_bb_free(bb, NULL); + err = PTR_ERR(job); + return ERR_PTR(err); + } + + xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB); + + xe_assert(xe, dma_resv_test_signaled(vram_bo->ttm.base.resv, + DMA_RESV_USAGE_BOOKKEEP)); + xe_assert(xe, dma_resv_test_signaled(sysmem_bo->ttm.base.resv, + DMA_RESV_USAGE_BOOKKEEP)); + + scoped_guard(mutex, &m->job_mutex) { + xe_sched_job_arm(job); + dma_fence_put(fence); + fence = dma_fence_get(&job->drm.s_fence->finished); + xe_sched_job_push(job); + + dma_fence_put(m->fence); + m->fence = dma_fence_get(fence); + } + + xe_bb_free(bb, fence); + size -= vram_L0; + } + + return fence; +} + static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, u32 size, u32 pitch) { @@ -1912,11 +2035,6 @@ static bool xe_migrate_vram_use_pde(struct drm_pagemap_addr *sram_addr, return true; } -enum xe_migrate_copy_dir { - XE_MIGRATE_COPY_TO_VRAM, - XE_MIGRATE_COPY_TO_SRAM, -}; - #define XE_CACHELINE_BYTES 64ull #define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1) diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h index 4fad324b62535..d7bcc6ad8464e 100644 --- a/drivers/gpu/drm/xe/xe_migrate.h +++ b/drivers/gpu/drm/xe/xe_migrate.h @@ -28,6 +28,11 @@ struct xe_vma; enum xe_sriov_vf_ccs_rw_ctxs; +enum xe_migrate_copy_dir { + XE_MIGRATE_COPY_TO_VRAM, + XE_MIGRATE_COPY_TO_SRAM, +}; + /** * struct xe_migrate_pt_update_ops - Callbacks for the * xe_migrate_update_pgtables() function. @@ -131,6 +136,9 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q, struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate); struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate); +struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset, + struct xe_bo *sysmem_bo, u64 sysmem_offset, + u64 size, enum xe_migrate_copy_dir dir); int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, unsigned long offset, void *buf, int len, int write); -- 2.50.1 Connect the helpers to allow save and restore of VRAM migration data in stop_copy / resume device state. Co-developed-by: Lukasz Laguna Signed-off-by: Lukasz Laguna Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c | 15 ++ drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 213 ++++++++++++++++++ drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h | 5 + .../drm/xe/xe_gt_sriov_pf_migration_types.h | 2 + drivers/gpu/drm/xe/xe_sriov_pf_control.c | 3 + 5 files changed, 238 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index 7cd7cae950bc7..de0f63610e780 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -880,6 +880,18 @@ static int pf_handle_vf_save_data(struct xe_gt *gt, unsigned int vfid) return -EAGAIN; } + if (xe_gt_sriov_pf_migration_save_test(gt, vfid, XE_SRIOV_MIGRATION_DATA_TYPE_VRAM)) { + ret = xe_gt_sriov_pf_migration_vram_save(gt, vfid); + if (ret == -EAGAIN) + return -EAGAIN; + else if (ret) + return ret; + + xe_gt_sriov_pf_migration_save_clear(gt, vfid, XE_SRIOV_MIGRATION_DATA_TYPE_VRAM); + + return -EAGAIN; + } + return 0; } @@ -1095,6 +1107,9 @@ static int pf_handle_vf_restore_data(struct xe_gt *gt, unsigned int vfid) case XE_SRIOV_MIGRATION_DATA_TYPE_GUC: ret = xe_gt_sriov_pf_migration_guc_restore(gt, vfid, data); break; + case XE_SRIOV_MIGRATION_DATA_TYPE_VRAM: + ret = xe_gt_sriov_pf_migration_vram_restore(gt, vfid, data); + break; default: xe_gt_sriov_notice(gt, "Skipping VF%u unknown data type: %d\n", vfid, data->type); break; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c index 5e90aeafeeb41..7ccd43545fdac 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c @@ -19,6 +19,7 @@ #include "xe_gt_sriov_printk.h" #include "xe_guc_buf.h" #include "xe_guc_ct.h" +#include "xe_migrate.h" #include "xe_mmio.h" #include "xe_sriov.h" #include "xe_sriov_migration_data.h" @@ -505,6 +506,207 @@ int xe_gt_sriov_pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid, return pf_restore_vf_mmio_mig_data(gt, vfid, data); } +static ssize_t pf_migration_vram_size(struct xe_gt *gt, unsigned int vfid) +{ + if (!xe_gt_is_main_type(gt)) + return 0; + + return xe_gt_sriov_pf_config_get_lmem(gt, vfid); +} + +static struct dma_fence *__pf_save_restore_vram(struct xe_gt *gt, unsigned int vfid, + struct xe_bo *vram, u64 vram_offset, + struct xe_bo *sysmem, u64 sysmem_offset, + size_t size, bool save) +{ + struct dma_fence *ret = NULL; + struct drm_exec exec; + int err; + + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + drm_exec_until_all_locked(&exec) { + err = drm_exec_lock_obj(&exec, &vram->ttm.base); + drm_exec_retry_on_contention(&exec); + if (err) { + ret = ERR_PTR(err); + goto err; + } + + err = drm_exec_lock_obj(&exec, &sysmem->ttm.base); + drm_exec_retry_on_contention(&exec); + if (err) { + ret = ERR_PTR(err); + goto err; + } + } + + ret = xe_migrate_vram_copy_chunk(vram, vram_offset, sysmem, sysmem_offset, size, + save ? XE_MIGRATE_COPY_TO_SRAM : XE_MIGRATE_COPY_TO_VRAM); + +err: + drm_exec_fini(&exec); + + return ret; +} + +#define PF_VRAM_SAVE_RESTORE_TIMEOUT (5 * HZ) +static int pf_save_vram_chunk(struct xe_gt *gt, unsigned int vfid, + struct xe_bo *src_vram, u64 src_vram_offset, + size_t size) +{ + struct xe_sriov_migration_data *data; + struct dma_fence *fence; + int ret; + + data = xe_sriov_migration_data_alloc(gt_to_xe(gt)); + if (!data) + return -ENOMEM; + + ret = xe_sriov_migration_data_init(data, gt->tile->id, gt->info.id, + XE_SRIOV_MIGRATION_DATA_TYPE_VRAM, + src_vram_offset, size); + if (ret) + goto fail; + + fence = __pf_save_restore_vram(gt, vfid, + src_vram, src_vram_offset, + data->bo, 0, size, true); + + ret = dma_fence_wait_timeout(fence, false, PF_VRAM_SAVE_RESTORE_TIMEOUT); + dma_fence_put(fence); + if (!ret) { + ret = -ETIME; + goto fail; + } + + xe_gt_sriov_dbg_verbose(gt, "VF%u VRAM data save (%zu bytes)\n", vfid, size); + pf_dump_mig_data(gt, vfid, data); + + ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data); + if (ret) + goto fail; + + return 0; + +fail: + xe_sriov_migration_data_free(data); + return ret; +} + +#define VF_VRAM_STATE_CHUNK_MAX_SIZE SZ_512M +static int pf_save_vf_vram_mig_data(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid); + loff_t *offset = &migration->save.vram_offset; + struct xe_bo *vram; + size_t vram_size, chunk_size; + int ret; + + vram = xe_gt_sriov_pf_config_get_lmem_obj(gt, vfid); + if (!vram) + return -ENXIO; + + vram_size = xe_bo_size(vram); + + xe_gt_assert(gt, *offset < vram_size); + + chunk_size = min(vram_size - *offset, VF_VRAM_STATE_CHUNK_MAX_SIZE); + + ret = pf_save_vram_chunk(gt, vfid, vram, *offset, chunk_size); + if (ret) + goto fail; + + *offset += chunk_size; + + xe_bo_put(vram); + + if (*offset < vram_size) + return -EAGAIN; + + return 0; + +fail: + xe_bo_put(vram); + xe_gt_sriov_err(gt, "Failed to save VF%u VRAM data (%pe)\n", vfid, ERR_PTR(ret)); + return ret; +} + +static int pf_restore_vf_vram_mig_data(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data) +{ + u64 end = data->hdr.offset + data->hdr.size; + struct dma_fence *fence; + struct xe_bo *vram; + size_t size; + int ret = 0; + + vram = xe_gt_sriov_pf_config_get_lmem_obj(gt, vfid); + if (!vram) + return -ENXIO; + + size = xe_bo_size(vram); + + if (end > size || end < data->hdr.size) { + ret = -EINVAL; + goto err; + } + + xe_gt_sriov_dbg_verbose(gt, "VF%u VRAM data restore (%llu bytes)\n", vfid, data->size); + pf_dump_mig_data(gt, vfid, data); + + fence = __pf_save_restore_vram(gt, vfid, vram, data->hdr.offset, + data->bo, 0, data->hdr.size, false); + ret = dma_fence_wait_timeout(fence, false, PF_VRAM_SAVE_RESTORE_TIMEOUT); + dma_fence_put(fence); + if (!ret) { + ret = -ETIME; + goto err; + } + + return 0; +err: + xe_bo_put(vram); + xe_gt_sriov_err(gt, "Failed to restore VF%u VRAM data (%pe)\n", vfid, ERR_PTR(ret)); + return ret; +} + +/** + * xe_gt_sriov_pf_migration_vram_save() - Save VF VRAM migration data. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_vram_save(struct xe_gt *gt, unsigned int vfid) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + return pf_save_vf_vram_mig_data(gt, vfid); +} + +/** + * xe_gt_sriov_pf_migration_vram_restore() - Restore VF VRAM migration data. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_vram_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + return pf_restore_vf_vram_mig_data(gt, vfid, data); +} + /** * xe_gt_sriov_pf_migration_size() - Total size of migration data from all components within a GT. * @gt: the &xe_gt @@ -544,6 +746,13 @@ ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid) size += sizeof(struct xe_sriov_pf_migration_hdr); total += size; + size = pf_migration_vram_size(gt, vfid); + if (size < 0) + return size; + if (size > 0) + size += sizeof(struct xe_sriov_pf_migration_hdr); + total += size; + return total; } @@ -602,6 +811,7 @@ void xe_gt_sriov_pf_migration_save_init(struct xe_gt *gt, unsigned int vfid) struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid); migration->save.data_remaining = 0; + migration->save.vram_offset = 0; xe_gt_assert(gt, pf_migration_guc_size(gt, vfid) > 0); set_bit(XE_SRIOV_MIGRATION_DATA_TYPE_GUC, &migration->save.data_remaining); @@ -611,6 +821,9 @@ void xe_gt_sriov_pf_migration_save_init(struct xe_gt *gt, unsigned int vfid) xe_gt_assert(gt, pf_migration_mmio_size(gt, vfid) > 0); set_bit(XE_SRIOV_MIGRATION_DATA_TYPE_MMIO, &migration->save.data_remaining); + + if (pf_migration_vram_size(gt, vfid) > 0) + set_bit(XE_SRIOV_MIGRATION_DATA_TYPE_VRAM, &migration->save.data_remaining); } /** diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h index b0eec94fea3a6..85b43b2cceb73 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h @@ -25,6 +25,11 @@ int xe_gt_sriov_pf_migration_ggtt_restore(struct xe_gt *gt, unsigned int vfid, int xe_gt_sriov_pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid, struct xe_sriov_migration_data *data); +int xe_gt_sriov_pf_migration_vram_save(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_migration_vram_restore(struct xe_gt *gt, unsigned int vfid, + struct xe_sriov_migration_data *data); + +void xe_gt_sriov_pf_migration_save_init(struct xe_gt *gt, unsigned int vfid); ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h index 9f24878690d9c..f50c64241e9c0 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h @@ -20,6 +20,8 @@ struct xe_gt_sriov_migration_data { struct { /** @save.data_remaining: bitmap of migration types that need to be saved */ unsigned long data_remaining; + /** @save.vram_offset: last saved offset within VRAM, used for chunked VRAM save */ + loff_t vram_offset; } save; }; diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_sriov_pf_control.c index c2768848daba1..aac8ecb861545 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_control.c @@ -5,6 +5,7 @@ #include "xe_device.h" #include "xe_gt_sriov_pf_control.h" +#include "xe_gt_sriov_pf_migration.h" #include "xe_sriov_migration_data.h" #include "xe_sriov_pf_control.h" #include "xe_sriov_printk.h" @@ -171,6 +172,8 @@ int xe_sriov_pf_control_trigger_save_vf(struct xe_device *xe, unsigned int vfid) return ret; for_each_gt(gt, xe, id) { + xe_gt_sriov_pf_migration_save_init(gt, vfid); + ret = xe_gt_sriov_pf_control_trigger_save_vf(gt, vfid); if (ret) return ret; -- 2.50.1 VF FLR requires additional processing done by PF driver. The processing is done after FLR is already finished from PCIe perspective. In order to avoid a scenario where migration state transitions while PF processing is still in progress, additional synchronization point is needed. Add a helper that will be used as part of VF driver struct pci_error_handlers .reset_done() callback. Signed-off-by: Michał Winiarski Reviewed-by: Michal Wajdeczko --- drivers/gpu/drm/xe/xe_sriov_pf_control.c | 24 ++++++++++++++++++++++++ drivers/gpu/drm/xe/xe_sriov_pf_control.h | 1 + 2 files changed, 25 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_sriov_pf_control.c index aac8ecb861545..bed488476706d 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_control.c @@ -123,6 +123,30 @@ int xe_sriov_pf_control_reset_vf(struct xe_device *xe, unsigned int vfid) return result; } +/** + * xe_sriov_pf_control_wait_flr() - Wait for a VF reset (FLR) to complete. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_wait_flr(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int result = 0; + int err; + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_control_wait_flr(gt, vfid); + result = result ? -EUCLEAN : err; + } + + return result; +} + /** * xe_sriov_pf_control_sync_flr() - Synchronize a VF FLR between all GTs. * @xe: the &xe_device diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_sriov_pf_control.h index 30318c1fba34e..ef9f219b21096 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_control.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf_control.h @@ -12,6 +12,7 @@ int xe_sriov_pf_control_pause_vf(struct xe_device *xe, unsigned int vfid); int xe_sriov_pf_control_resume_vf(struct xe_device *xe, unsigned int vfid); int xe_sriov_pf_control_stop_vf(struct xe_device *xe, unsigned int vfid); int xe_sriov_pf_control_reset_vf(struct xe_device *xe, unsigned int vfid); +int xe_sriov_pf_control_wait_flr(struct xe_device *xe, unsigned int vfid); int xe_sriov_pf_control_sync_flr(struct xe_device *xe, unsigned int vfid); int xe_sriov_pf_control_trigger_save_vf(struct xe_device *xe, unsigned int vfid); int xe_sriov_pf_control_finish_save_vf(struct xe_device *xe, unsigned int vfid); -- 2.50.1 All of the necessary building blocks are now in place to support SR-IOV VF migration. Enable the feature without the need to pass feature enabling debug flags for those platforms and rely on .has_memirq presence instead (like with VF resource fixup). Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_sriov_pf_migration.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c index 6992c227e5a44..c4e9b0ff5b3ae 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c @@ -50,8 +50,10 @@ bool xe_sriov_pf_migration_supported(struct xe_device *xe) static bool pf_check_migration_support(struct xe_device *xe) { - /* XXX: for now this is for feature enabling only */ - return IS_ENABLED(CONFIG_DRM_XE_DEBUG); + if (xe_device_has_memirq(xe)) + return true; + + return false; } static void pf_migration_cleanup(void *arg) -- 2.50.1 In certain scenarios (such as VF migration), VF driver needs to interact with PF driver. Add a helper to allow VF driver access to PF xe_device. Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_pci.c | 17 +++++++++++++++++ drivers/gpu/drm/xe/xe_pci.h | 3 +++ 2 files changed, 20 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index aeae675c912b7..3e7b03c847a42 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -1224,6 +1224,23 @@ static struct pci_driver xe_pci_driver = { #endif }; +/** + * xe_pci_get_pf_xe_device() - Get PF &xe_device. + * @pdev: the VF &pci_dev device + * + * Return: pointer to PF &xe_device, NULL otherwise. + */ +struct xe_device *xe_pci_get_pf_xe_device(struct pci_dev *pdev) +{ + struct drm_device *drm; + + drm = pci_iov_get_pf_drvdata(pdev, &xe_pci_driver); + if (IS_ERR(drm)) + return NULL; + + return to_xe_device(drm); +} + int xe_register_pci_driver(void) { return pci_register_driver(&xe_pci_driver); diff --git a/drivers/gpu/drm/xe/xe_pci.h b/drivers/gpu/drm/xe/xe_pci.h index 611c1209b14cc..2bb2e486756db 100644 --- a/drivers/gpu/drm/xe/xe_pci.h +++ b/drivers/gpu/drm/xe/xe_pci.h @@ -6,6 +6,9 @@ #ifndef _XE_PCI_H_ #define _XE_PCI_H_ +struct pci_dev; + +struct xe_device *xe_pci_get_pf_xe_device(struct pci_dev *pdev); int xe_register_pci_driver(void); void xe_unregister_pci_driver(void); -- 2.50.1 Vendor-specific VFIO driver for Xe will implement VF migration. Export everything that's needed for migration ops. Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/Makefile | 2 + drivers/gpu/drm/xe/xe_sriov_vfio.c | 248 +++++++++++++++++++++++++++++ include/drm/intel/xe_sriov_vfio.h | 30 ++++ 3 files changed, 280 insertions(+) create mode 100644 drivers/gpu/drm/xe/xe_sriov_vfio.c create mode 100644 include/drm/intel/xe_sriov_vfio.h diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index 3d72db9e528e4..de3778f51ce7e 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -182,6 +182,8 @@ xe-$(CONFIG_PCI_IOV) += \ xe_sriov_pf_service.o \ xe_tile_sriov_pf_debugfs.o +xe-$(CONFIG_XE_VFIO_PCI) += xe_sriov_vfio.o + # include helpers for tests even when XE is built-in ifdef CONFIG_DRM_XE_KUNIT_TEST xe-y += tests/xe_kunit_helpers.o diff --git a/drivers/gpu/drm/xe/xe_sriov_vfio.c b/drivers/gpu/drm/xe/xe_sriov_vfio.c new file mode 100644 index 0000000000000..9b02a86132e8b --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_vfio.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include + +#include "xe_assert.h" +#include "xe_pci.h" +#include "xe_pm.h" +#include "xe_sriov_migration_data.h" +#include "xe_sriov_pf_control.h" +#include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_migration.h" + +/** + * xe_sriov_vfio_get_xe_device() - Get PF &xe_device. + * @pdev: the VF &pci_dev device + * + * Return: pointer to PF &xe_device, NULL otherwise. + */ +struct xe_device *xe_sriov_vfio_get_xe_device(struct pci_dev *pdev) +{ + return xe_pci_get_pf_xe_device(pdev); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_get_xe_device, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_migration_supported() - Check if migration is supported. + * @xe: the PF &xe_device + * + * Return: true if migration is supported, false otherwise. + */ +bool xe_sriov_vfio_migration_supported(struct xe_device *xe) +{ + return xe_sriov_pf_migration_supported(xe); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_migration_supported, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_wait_flr_done() - Wait for VF FLR completion. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * This function will wait until VF FLR is processed by PF on all tiles (or + * until timeout occurs). + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_wait_flr_done(struct xe_device *xe, unsigned int vfid) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + return xe_sriov_pf_control_wait_flr(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_wait_flr_done, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_suspend_device() - Suspend VF. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * This function will pause VF on all tiles/GTs. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_suspend_device(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + return xe_sriov_pf_control_pause_vf(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_suspend_device, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_resume_device() - Resume VF. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * This function will resume VF on all tiles. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_resume_device(struct xe_device *xe, unsigned int vfid) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + return xe_sriov_pf_control_resume_vf(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_resume_device, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_stop_copy_enter() - Initiate a VF device migration data save. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_stop_copy_enter(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + return xe_sriov_pf_control_trigger_save_vf(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_stop_copy_enter, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_stop_copy_exit() - Finish a VF device migration data save. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_stop_copy_exit(struct xe_device *xe, unsigned int vfid) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + return xe_sriov_pf_control_finish_save_vf(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_stop_copy_exit, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_resume_data_enter() - Initiate a VF device migration data restore. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_resume_data_enter(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + return xe_sriov_pf_control_trigger_restore_vf(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_resume_data_enter, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_resume_data_exit() - Finish a VF device migration data restore. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_resume_data_exit(struct xe_device *xe, unsigned int vfid) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + return xe_sriov_pf_control_finish_restore_vf(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_resume_data_exit, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_error() - Move VF device to error state. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * Reset is needed to move it out of error state. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_error(struct xe_device *xe, unsigned int vfid) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + return xe_sriov_pf_control_stop_vf(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_error, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_data_read() - Read migration data from the VF device. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * @buf: start address of userspace buffer + * @len: requested read size from userspace + * + * Return: number of bytes that has been successfully read, + * 0 if no more migration data is available, -errno on failure. + */ +ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + return xe_sriov_migration_data_read(xe, vfid, buf, len); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_read, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_data_write() - Write migration data to the VF device. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * @buf: start address of userspace buffer + * @len: requested write size from userspace + * + * Return: number of bytes that has been successfully written, -errno on failure. + */ +ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + return xe_sriov_migration_data_write(xe, vfid, buf, len); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_write, "xe-vfio-pci"); + +/** + * xe_sriov_vfio_stop_copy_size() - Get a size estimate of VF device migration data. + * @xe: the PF &xe_device + * @vfid: the VF identifier (can't be 0) + * + * Return: migration data size in bytes or a negative error code on failure. + */ +ssize_t xe_sriov_vfio_stop_copy_size(struct xe_device *xe, unsigned int vfid) +{ + if (vfid == PFID || vfid > xe_sriov_pf_get_totalvfs(xe)) + return -EINVAL; + + xe_assert(xe, !xe_pm_runtime_suspended(xe)); + + return xe_sriov_pf_migration_size(xe, vfid); +} +EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_stop_copy_size, "xe-vfio-pci"); diff --git a/include/drm/intel/xe_sriov_vfio.h b/include/drm/intel/xe_sriov_vfio.h new file mode 100644 index 0000000000000..5b02f66465ed9 --- /dev/null +++ b/include/drm/intel/xe_sriov_vfio.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_VFIO_H_ +#define _XE_SRIOV_VFIO_H_ + +#include + +struct pci_dev; +struct xe_device; + +struct xe_device *xe_sriov_vfio_get_xe_device(struct pci_dev *pdev); +bool xe_sriov_vfio_migration_supported(struct xe_device *xe); +int xe_sriov_vfio_wait_flr_done(struct xe_device *xe, unsigned int vfid); +int xe_sriov_vfio_suspend_device(struct xe_device *xe, unsigned int vfid); +int xe_sriov_vfio_resume_device(struct xe_device *xe, unsigned int vfid); +int xe_sriov_vfio_stop_copy_enter(struct xe_device *xe, unsigned int vfid); +int xe_sriov_vfio_stop_copy_exit(struct xe_device *xe, unsigned int vfid); +int xe_sriov_vfio_resume_data_enter(struct xe_device *xe, unsigned int vfid); +int xe_sriov_vfio_resume_data_exit(struct xe_device *xe, unsigned int vfid); +int xe_sriov_vfio_error(struct xe_device *xe, unsigned int vfid); +ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len); +ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len); +ssize_t xe_sriov_vfio_stop_copy_size(struct xe_device *xe, unsigned int vfid); + +#endif -- 2.50.1 In order to allow VFIO users to choose the right driver override, VFIO driver variant used for VF migration needs to use Intel Graphics PCI IDs. Add INTEL_VGA_VFIO_DEVICE match that sets VFIO override_only. Signed-off-by: Michał Winiarski --- include/drm/intel/pciids.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h index b258e79b437ac..d14ce43139a28 100644 --- a/include/drm/intel/pciids.h +++ b/include/drm/intel/pciids.h @@ -43,6 +43,13 @@ .class = PCI_BASE_CLASS_DISPLAY << 16, .class_mask = 0xff << 16, \ .driver_data = (kernel_ulong_t)(_info), \ } + +#define INTEL_VGA_VFIO_DEVICE(_id, _info) { \ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, (_id)), \ + .class = PCI_BASE_CLASS_DISPLAY << 16, .class_mask = 0xff << 16, \ + .driver_data = (kernel_ulong_t)(_info), \ + .override_only = PCI_ID_F_VFIO_DRIVER_OVERRIDE, \ +} #endif #define INTEL_I810_IDS(MACRO__, ...) \ -- 2.50.1 In addition to generic VFIO PCI functionality, the driver implements VFIO migration uAPI, allowing userspace to enable migration for Intel Graphics SR-IOV Virtual Functions. The driver binds to VF device, and uses API exposed by Xe driver bound to PF device to control VF device state and transfer the migration data. Signed-off-by: Michał Winiarski --- MAINTAINERS | 7 + drivers/vfio/pci/Kconfig | 2 + drivers/vfio/pci/Makefile | 2 + drivers/vfio/pci/xe/Kconfig | 12 + drivers/vfio/pci/xe/Makefile | 3 + drivers/vfio/pci/xe/main.c | 552 +++++++++++++++++++++++++++++++++++ 6 files changed, 578 insertions(+) create mode 100644 drivers/vfio/pci/xe/Kconfig create mode 100644 drivers/vfio/pci/xe/Makefile create mode 100644 drivers/vfio/pci/xe/main.c diff --git a/MAINTAINERS b/MAINTAINERS index b890ff265f03f..d73348c5f3f3e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -27008,6 +27008,13 @@ L: virtualization@lists.linux.dev S: Maintained F: drivers/vfio/pci/virtio +VFIO XE PCI DRIVER +M: Michał Winiarski +L: kvm@vger.kernel.org +L: intel-xe@lists.freedesktop.org +S: Supported +F: drivers/vfio/pci/xe + VGA_SWITCHEROO R: Lukas Wunner S: Maintained diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig index 2b0172f546652..c100f0ab87f2d 100644 --- a/drivers/vfio/pci/Kconfig +++ b/drivers/vfio/pci/Kconfig @@ -67,4 +67,6 @@ source "drivers/vfio/pci/nvgrace-gpu/Kconfig" source "drivers/vfio/pci/qat/Kconfig" +source "drivers/vfio/pci/xe/Kconfig" + endmenu diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile index cf00c0a7e55c8..f5d46aa9347b9 100644 --- a/drivers/vfio/pci/Makefile +++ b/drivers/vfio/pci/Makefile @@ -19,3 +19,5 @@ obj-$(CONFIG_VIRTIO_VFIO_PCI) += virtio/ obj-$(CONFIG_NVGRACE_GPU_VFIO_PCI) += nvgrace-gpu/ obj-$(CONFIG_QAT_VFIO_PCI) += qat/ + +obj-$(CONFIG_XE_VFIO_PCI) += xe/ diff --git a/drivers/vfio/pci/xe/Kconfig b/drivers/vfio/pci/xe/Kconfig new file mode 100644 index 0000000000000..787be88268685 --- /dev/null +++ b/drivers/vfio/pci/xe/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +config XE_VFIO_PCI + tristate "VFIO support for Intel Graphics" + depends on DRM_XE + select VFIO_PCI_CORE + help + This option enables vendor-specific VFIO driver for Intel Graphics. + In addition to generic VFIO PCI functionality, it implements VFIO + migration uAPI allowing userspace to enable migration for + Intel Graphics SR-IOV Virtual Functions supported by the Xe driver. + + If you don't know what to do here, say N. diff --git a/drivers/vfio/pci/xe/Makefile b/drivers/vfio/pci/xe/Makefile new file mode 100644 index 0000000000000..13aa0fd192cd4 --- /dev/null +++ b/drivers/vfio/pci/xe/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_XE_VFIO_PCI) += xe-vfio-pci.o +xe-vfio-pci-y := main.o diff --git a/drivers/vfio/pci/xe/main.c b/drivers/vfio/pci/xe/main.c new file mode 100644 index 0000000000000..4892c6648c0a7 --- /dev/null +++ b/drivers/vfio/pci/xe/main.c @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright © 2025 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct xe_vfio_pci_migration_file { + struct file *filp; + /* serializes accesses to migration data */ + struct mutex lock; + struct xe_vfio_pci_core_device *xe_vdev; +}; + +struct xe_vfio_pci_core_device { + struct vfio_pci_core_device core_device; + struct xe_device *xe; + /* VF number used by PF, Xe HW/FW components use vfid indexing starting from 1 */ + unsigned int vfid; + u8 migrate_cap:1; + u8 deferred_reset:1; + /* protects migration state */ + struct mutex state_mutex; + enum vfio_device_mig_state mig_state; + /* protects the reset_done flow */ + spinlock_t reset_lock; + struct xe_vfio_pci_migration_file *migf; +}; + +#define xe_vdev_to_dev(xe_vdev) (&(xe_vdev)->core_device.pdev->dev) + +static void xe_vfio_pci_disable_file(struct xe_vfio_pci_migration_file *migf) +{ + struct xe_vfio_pci_core_device *xe_vdev = migf->xe_vdev; + + mutex_lock(&migf->lock); + xe_vdev->migf = NULL; + mutex_unlock(&migf->lock); +} + +static void xe_vfio_pci_reset(struct xe_vfio_pci_core_device *xe_vdev) +{ + if (xe_vdev->migf) + xe_vfio_pci_disable_file(xe_vdev->migf); + + xe_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING; +} + +static void xe_vfio_pci_state_mutex_lock(struct xe_vfio_pci_core_device *xe_vdev) +{ + mutex_lock(&xe_vdev->state_mutex); +} + +/* + * This function is called in all state_mutex unlock cases to + * handle a 'deferred_reset' if exists. + */ +static void xe_vfio_pci_state_mutex_unlock(struct xe_vfio_pci_core_device *xe_vdev) +{ +again: + spin_lock(&xe_vdev->reset_lock); + if (xe_vdev->deferred_reset) { + xe_vdev->deferred_reset = false; + spin_unlock(&xe_vdev->reset_lock); + xe_vfio_pci_reset(xe_vdev); + goto again; + } + mutex_unlock(&xe_vdev->state_mutex); + spin_unlock(&xe_vdev->reset_lock); +} + +static void xe_vfio_pci_reset_done(struct pci_dev *pdev) +{ + struct xe_vfio_pci_core_device *xe_vdev = pci_get_drvdata(pdev); + int ret; + + if (!xe_vdev->vfid) + return; + + /* + * VF FLR requires additional processing done by PF driver. + * The processing is done after FLR is already finished from PCIe + * perspective. + * In order to avoid a scenario where VF is used while PF processing + * is still in progress, additional synchronization point is needed. + */ + ret = xe_sriov_vfio_wait_flr_done(xe_vdev->xe, xe_vdev->vfid); + if (ret) + dev_err(&pdev->dev, "Failed to wait for FLR: %d\n", ret); + + if (!xe_vdev->migrate_cap) + return; + + /* + * As the higher VFIO layers are holding locks across reset and using + * those same locks with the mm_lock we need to prevent ABBA deadlock + * with the state_mutex and mm_lock. + * In case the state_mutex was taken already we defer the cleanup work + * to the unlock flow of the other running context. + */ + spin_lock(&xe_vdev->reset_lock); + xe_vdev->deferred_reset = true; + if (!mutex_trylock(&xe_vdev->state_mutex)) { + spin_unlock(&xe_vdev->reset_lock); + return; + } + spin_unlock(&xe_vdev->reset_lock); + xe_vfio_pci_state_mutex_unlock(xe_vdev); + + xe_vfio_pci_reset(xe_vdev); +} + +static const struct pci_error_handlers xe_vfio_pci_err_handlers = { + .reset_done = xe_vfio_pci_reset_done, + .error_detected = vfio_pci_core_aer_err_detected, +}; + +static int xe_vfio_pci_open_device(struct vfio_device *core_vdev) +{ + struct xe_vfio_pci_core_device *xe_vdev = + container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev); + struct vfio_pci_core_device *vdev = &xe_vdev->core_device; + int ret; + + ret = vfio_pci_core_enable(vdev); + if (ret) + return ret; + + vfio_pci_core_finish_enable(vdev); + + return 0; +} + +static int xe_vfio_pci_release_file(struct inode *inode, struct file *filp) +{ + struct xe_vfio_pci_migration_file *migf = filp->private_data; + + xe_vfio_pci_disable_file(migf); + mutex_destroy(&migf->lock); + kfree(migf); + + return 0; +} + +static ssize_t xe_vfio_pci_save_read(struct file *filp, char __user *buf, size_t len, loff_t *pos) +{ + struct xe_vfio_pci_migration_file *migf = filp->private_data; + ssize_t ret; + + if (pos) + return -ESPIPE; + + mutex_lock(&migf->lock); + ret = xe_sriov_vfio_data_read(migf->xe_vdev->xe, migf->xe_vdev->vfid, buf, len); + mutex_unlock(&migf->lock); + + return ret; +} + +static const struct file_operations xe_vfio_pci_save_fops = { + .owner = THIS_MODULE, + .read = xe_vfio_pci_save_read, + .release = xe_vfio_pci_release_file, + .llseek = noop_llseek, +}; + +static ssize_t xe_vfio_pci_resume_write(struct file *filp, const char __user *buf, + size_t len, loff_t *pos) +{ + struct xe_vfio_pci_migration_file *migf = filp->private_data; + ssize_t ret; + + if (pos) + return -ESPIPE; + + mutex_lock(&migf->lock); + ret = xe_sriov_vfio_data_write(migf->xe_vdev->xe, migf->xe_vdev->vfid, buf, len); + mutex_unlock(&migf->lock); + + return ret; +} + +static const struct file_operations xe_vfio_pci_resume_fops = { + .owner = THIS_MODULE, + .write = xe_vfio_pci_resume_write, + .release = xe_vfio_pci_release_file, + .llseek = noop_llseek, +}; + +static const char *vfio_dev_state_str(u32 state) +{ + switch (state) { + case VFIO_DEVICE_STATE_RUNNING: return "running"; + case VFIO_DEVICE_STATE_RUNNING_P2P: return "running_p2p"; + case VFIO_DEVICE_STATE_STOP_COPY: return "stopcopy"; + case VFIO_DEVICE_STATE_STOP: return "stop"; + case VFIO_DEVICE_STATE_RESUMING: return "resuming"; + case VFIO_DEVICE_STATE_ERROR: return "error"; + default: return ""; + } +} + +enum xe_vfio_pci_file_type { + XE_VFIO_FILE_SAVE = 0, + XE_VFIO_FILE_RESUME, +}; + +static struct xe_vfio_pci_migration_file * +xe_vfio_pci_alloc_file(struct xe_vfio_pci_core_device *xe_vdev, + enum xe_vfio_pci_file_type type) +{ + struct xe_vfio_pci_migration_file *migf; + const struct file_operations *fops; + int flags; + + migf = kzalloc(sizeof(*migf), GFP_KERNEL); + if (!migf) + return ERR_PTR(-ENOMEM); + + fops = type == XE_VFIO_FILE_SAVE ? &xe_vfio_pci_save_fops : &xe_vfio_pci_resume_fops; + flags = type == XE_VFIO_FILE_SAVE ? O_RDONLY : O_WRONLY; + migf->filp = anon_inode_getfile("xe_vfio_mig", fops, migf, flags); + if (IS_ERR(migf->filp)) { + kfree(migf); + return ERR_CAST(migf->filp); + } + + mutex_init(&migf->lock); + migf->xe_vdev = xe_vdev; + xe_vdev->migf = migf; + + stream_open(migf->filp->f_inode, migf->filp); + + return migf; +} + +static struct file * +xe_vfio_set_state(struct xe_vfio_pci_core_device *xe_vdev, u32 new) +{ + u32 cur = xe_vdev->mig_state; + int ret; + + dev_dbg(xe_vdev_to_dev(xe_vdev), + "state: %s->%s\n", vfio_dev_state_str(cur), vfio_dev_state_str(new)); + + /* + * "STOP" handling is reused for "RUNNING_P2P", as the device doesn't + * have the capability to selectively block outgoing p2p DMA transfers. + * While the device is allowing BAR accesses when the VF is stopped, it + * is not processing any new workload requests, effectively stopping + * any outgoing DMA transfers (not just p2p). + * Any VRAM / MMIO accesses occurring during "RUNNING_P2P" are kept and + * will be migrated to target VF during stop-copy. + */ + if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) { + ret = xe_sriov_vfio_suspend_device(xe_vdev->xe, xe_vdev->vfid); + if (ret) + goto err; + + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) || + (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P)) + return NULL; + + if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) { + ret = xe_sriov_vfio_resume_device(xe_vdev->xe, xe_vdev->vfid); + if (ret) + goto err; + + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) { + struct xe_vfio_pci_migration_file *migf; + + migf = xe_vfio_pci_alloc_file(xe_vdev, XE_VFIO_FILE_SAVE); + if (IS_ERR(migf)) { + ret = PTR_ERR(migf); + goto err; + } + get_file(migf->filp); + + ret = xe_sriov_vfio_stop_copy_enter(xe_vdev->xe, xe_vdev->vfid); + if (ret) { + fput(migf->filp); + goto err; + } + + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) { + if (xe_vdev->migf) { + fput(xe_vdev->migf->filp); + xe_vfio_pci_disable_file(xe_vdev->migf); + } + + ret = xe_sriov_vfio_stop_copy_exit(xe_vdev->xe, xe_vdev->vfid); + if (ret) + goto err; + + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) { + struct xe_vfio_pci_migration_file *migf; + + migf = xe_vfio_pci_alloc_file(xe_vdev, XE_VFIO_FILE_RESUME); + if (IS_ERR(migf)) { + ret = PTR_ERR(migf); + goto err; + } + get_file(migf->filp); + + ret = xe_sriov_vfio_resume_data_enter(xe_vdev->xe, xe_vdev->vfid); + if (ret) { + fput(migf->filp); + goto err; + } + + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) { + if (xe_vdev->migf) { + fput(xe_vdev->migf->filp); + xe_vfio_pci_disable_file(xe_vdev->migf); + } + + ret = xe_sriov_vfio_resume_data_exit(xe_vdev->xe, xe_vdev->vfid); + if (ret) + goto err; + + return NULL; + } + + WARN(true, "Unknown state transition %d->%d", cur, new); + return ERR_PTR(-EINVAL); + +err: + dev_dbg(xe_vdev_to_dev(xe_vdev), + "Failed to transition state: %s->%s err=%d\n", + vfio_dev_state_str(cur), vfio_dev_state_str(new), ret); + return ERR_PTR(ret); +} + +static struct file * +xe_vfio_pci_set_device_state(struct vfio_device *core_vdev, + enum vfio_device_mig_state new_state) +{ + struct xe_vfio_pci_core_device *xe_vdev = + container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev); + enum vfio_device_mig_state next_state; + struct file *f = NULL; + int ret; + + xe_vfio_pci_state_mutex_lock(xe_vdev); + while (new_state != xe_vdev->mig_state) { + ret = vfio_mig_get_next_state(core_vdev, xe_vdev->mig_state, + new_state, &next_state); + if (ret) { + xe_sriov_vfio_error(xe_vdev->xe, xe_vdev->vfid); + f = ERR_PTR(ret); + break; + } + f = xe_vfio_set_state(xe_vdev, next_state); + if (IS_ERR(f)) + break; + + xe_vdev->mig_state = next_state; + + /* Multiple state transitions with non-NULL file in the middle */ + if (f && new_state != xe_vdev->mig_state) { + fput(f); + f = ERR_PTR(-EINVAL); + break; + } + } + xe_vfio_pci_state_mutex_unlock(xe_vdev); + + return f; +} + +static int xe_vfio_pci_get_device_state(struct vfio_device *core_vdev, + enum vfio_device_mig_state *curr_state) +{ + struct xe_vfio_pci_core_device *xe_vdev = + container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev); + + xe_vfio_pci_state_mutex_lock(xe_vdev); + *curr_state = xe_vdev->mig_state; + xe_vfio_pci_state_mutex_unlock(xe_vdev); + + return 0; +} + +static int xe_vfio_pci_get_data_size(struct vfio_device *vdev, + unsigned long *stop_copy_length) +{ + struct xe_vfio_pci_core_device *xe_vdev = + container_of(vdev, struct xe_vfio_pci_core_device, core_device.vdev); + + xe_vfio_pci_state_mutex_lock(xe_vdev); + *stop_copy_length = xe_sriov_vfio_stop_copy_size(xe_vdev->xe, xe_vdev->vfid); + xe_vfio_pci_state_mutex_unlock(xe_vdev); + + return 0; +} + +static const struct vfio_migration_ops xe_vfio_pci_migration_ops = { + .migration_set_state = xe_vfio_pci_set_device_state, + .migration_get_state = xe_vfio_pci_get_device_state, + .migration_get_data_size = xe_vfio_pci_get_data_size, +}; + +static void xe_vfio_pci_migration_init(struct xe_vfio_pci_core_device *xe_vdev) +{ + struct vfio_device *core_vdev = &xe_vdev->core_device.vdev; + struct pci_dev *pdev = to_pci_dev(core_vdev->dev); + struct xe_device *xe = xe_sriov_vfio_get_xe_device(pdev); + int ret; + + if (!xe) + return; + if (!xe_sriov_vfio_migration_supported(xe)) + return; + + ret = pci_iov_vf_id(pdev); + if (ret < 0) + return; + + mutex_init(&xe_vdev->state_mutex); + spin_lock_init(&xe_vdev->reset_lock); + + /* Xe HW/FW components use vfid indexing starting from 1 */ + xe_vdev->vfid = ret + 1; + xe_vdev->xe = xe; + xe_vdev->migrate_cap = true; + + core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P; + core_vdev->mig_ops = &xe_vfio_pci_migration_ops; +} + +static void xe_vfio_pci_migration_fini(struct xe_vfio_pci_core_device *xe_vdev) +{ + if (!xe_vdev->migrate_cap) + return; + + mutex_destroy(&xe_vdev->state_mutex); +} + +static int xe_vfio_pci_init_dev(struct vfio_device *core_vdev) +{ + struct xe_vfio_pci_core_device *xe_vdev = + container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev); + + xe_vfio_pci_migration_init(xe_vdev); + + return vfio_pci_core_init_dev(core_vdev); +} + +static void xe_vfio_pci_release_dev(struct vfio_device *core_vdev) +{ + struct xe_vfio_pci_core_device *xe_vdev = + container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev); + + xe_vfio_pci_migration_fini(xe_vdev); +} + +static const struct vfio_device_ops xe_vfio_pci_ops = { + .name = "xe-vfio-pci", + .init = xe_vfio_pci_init_dev, + .release = xe_vfio_pci_release_dev, + .open_device = xe_vfio_pci_open_device, + .close_device = vfio_pci_core_close_device, + .ioctl = vfio_pci_core_ioctl, + .device_feature = vfio_pci_core_ioctl_feature, + .read = vfio_pci_core_read, + .write = vfio_pci_core_write, + .mmap = vfio_pci_core_mmap, + .request = vfio_pci_core_request, + .match = vfio_pci_core_match, + .match_token_uuid = vfio_pci_core_match_token_uuid, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, + .detach_ioas = vfio_iommufd_physical_detach_ioas, +}; + +static int xe_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct xe_vfio_pci_core_device *xe_vdev; + int ret; + + xe_vdev = vfio_alloc_device(xe_vfio_pci_core_device, core_device.vdev, &pdev->dev, + &xe_vfio_pci_ops); + if (IS_ERR(xe_vdev)) + return PTR_ERR(xe_vdev); + + dev_set_drvdata(&pdev->dev, &xe_vdev->core_device); + + ret = vfio_pci_core_register_device(&xe_vdev->core_device); + if (ret) { + vfio_put_device(&xe_vdev->core_device.vdev); + return ret; + } + + return 0; +} + +static void xe_vfio_pci_remove(struct pci_dev *pdev) +{ + struct xe_vfio_pci_core_device *xe_vdev = pci_get_drvdata(pdev); + + vfio_pci_core_unregister_device(&xe_vdev->core_device); + vfio_put_device(&xe_vdev->core_device.vdev); +} + +static const struct pci_device_id xe_vfio_pci_table[] = { + INTEL_PTL_IDS(INTEL_VGA_VFIO_DEVICE, NULL), + INTEL_WCL_IDS(INTEL_VGA_VFIO_DEVICE, NULL), + INTEL_BMG_IDS(INTEL_VGA_VFIO_DEVICE, NULL), + {} +}; +MODULE_DEVICE_TABLE(pci, xe_vfio_pci_table); + +static struct pci_driver xe_vfio_pci_driver = { + .name = "xe-vfio-pci", + .id_table = xe_vfio_pci_table, + .probe = xe_vfio_pci_probe, + .remove = xe_vfio_pci_remove, + .err_handler = &xe_vfio_pci_err_handlers, + .driver_managed_dma = true, +}; +module_pci_driver(xe_vfio_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michał Winiarski "); +MODULE_DESCRIPTION("VFIO PCI driver with migration support for Intel Graphics"); -- 2.50.1