Add debugfs handlers for migration state and handle bitstream .read()/.write() to convert from bitstream to/from migration data packets. As descriptor/trailer are handled at this layer - add handling for both save and restore side. Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c | 18 + drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h | 1 + drivers/gpu/drm/xe/xe_sriov_pf.c | 1 + drivers/gpu/drm/xe/xe_sriov_pf_control.c | 5 + drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c | 45 +++ drivers/gpu/drm/xe/xe_sriov_pf_migration.c | 56 +++ .../gpu/drm/xe/xe_sriov_pf_migration_data.c | 353 ++++++++++++++++++ .../gpu/drm/xe/xe_sriov_pf_migration_data.h | 5 + .../gpu/drm/xe/xe_sriov_pf_migration_types.h | 9 + 9 files changed, 493 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index 04a4e92133c2e..092d3d710bca1 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -814,6 +814,23 @@ bool xe_gt_sriov_pf_control_check_vf_data_wip(struct xe_gt *gt, unsigned int vfi return pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_MIGRATION_DATA_WIP); } +/** + * xe_gt_sriov_pf_control_vf_data_eof() - indicate the end of SR-IOV VF migration data production + * @gt: the &struct xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + */ +void xe_gt_sriov_pf_control_vf_data_eof(struct xe_gt *gt, unsigned int vfid) +{ + struct wait_queue_head *wq = xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid); + + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_MIGRATION_DATA_WIP)) + pf_enter_vf_state_machine_bug(gt, vfid); + + wake_up_all(wq); +} + static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid) { pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP); @@ -840,6 +857,7 @@ static bool pf_handle_vf_save_wip(struct xe_gt *gt, unsigned int vfid) if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) return false; + xe_gt_sriov_pf_control_vf_data_eof(gt, vfid); pf_exit_vf_save_wip(gt, vfid); pf_enter_vf_saved(gt, vfid); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h index 2e121e8132dcf..caf20dd063b1b 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h @@ -15,6 +15,7 @@ int xe_gt_sriov_pf_control_init(struct xe_gt *gt); void xe_gt_sriov_pf_control_restart(struct xe_gt *gt); bool xe_gt_sriov_pf_control_check_vf_data_wip(struct xe_gt *gt, unsigned int vfid); +void xe_gt_sriov_pf_control_vf_data_eof(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid); diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.c b/drivers/gpu/drm/xe/xe_sriov_pf.c index 95743c7af8050..5d115627f3f2f 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf.c @@ -16,6 +16,7 @@ #include "xe_sriov_pf.h" #include "xe_sriov_pf_helpers.h" #include "xe_sriov_pf_migration.h" +#include "xe_sriov_pf_migration_data.h" #include "xe_sriov_pf_service.h" #include "xe_sriov_printk.h" diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_sriov_pf_control.c index e64c7b56172c6..10e1f18aa8b11 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_control.c @@ -6,6 +6,7 @@ #include "xe_device.h" #include "xe_gt_sriov_pf_control.h" #include "xe_sriov_pf_control.h" +#include "xe_sriov_pf_migration_data.h" #include "xe_sriov_printk.h" /** @@ -165,6 +166,10 @@ int xe_sriov_pf_control_save_vf(struct xe_device *xe, unsigned int vfid) unsigned int id; int ret; + ret = xe_sriov_pf_migration_data_save_init(xe, vfid); + if (ret) + return ret; + for_each_gt(gt, xe, id) { ret = xe_gt_sriov_pf_control_save_vf(gt, vfid); if (ret) diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c index 74eeabef91c57..ce780719760a6 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c @@ -13,6 +13,7 @@ #include "xe_sriov_pf_control.h" #include "xe_sriov_pf_debugfs.h" #include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_migration_data.h" #include "xe_sriov_pf_service.h" #include "xe_sriov_printk.h" #include "xe_tile_sriov_pf_debugfs.h" @@ -71,6 +72,7 @@ static void pf_populate_pf(struct xe_device *xe, struct dentry *pfdent) * /sys/kernel/debug/dri/BDF/ * ├── sriov * │ ├── vf1 + * │ │ ├── migration_data * │ │ ├── pause * │ │ ├── reset * │ │ ├── resume @@ -159,6 +161,48 @@ DEFINE_VF_CONTROL_ATTRIBUTE(reset_vf); DEFINE_VF_RW_CONTROL_ATTRIBUTE(save_vf); DEFINE_VF_RW_CONTROL_ATTRIBUTE(restore_vf); +static ssize_t data_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) +{ + struct dentry *dent = file_dentry(file); + struct dentry *vfdentry = dent->d_parent; + struct dentry *migration_dentry = vfdentry->d_parent; + unsigned int vfid = (uintptr_t)vfdentry->d_inode->i_private; + struct xe_device *xe = migration_dentry->d_inode->i_private; + + xe_assert(xe, vfid); + xe_sriov_pf_assert_vfid(xe, vfid); + + if (*pos) + return -ESPIPE; + + return xe_sriov_pf_migration_data_write(xe, vfid, buf, count); +} + +static ssize_t data_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + struct dentry *dent = file_dentry(file); + struct dentry *vfdentry = dent->d_parent; + struct dentry *migration_dentry = vfdentry->d_parent; + unsigned int vfid = (uintptr_t)vfdentry->d_inode->i_private; + struct xe_device *xe = migration_dentry->d_inode->i_private; + + xe_assert(xe, vfid); + xe_sriov_pf_assert_vfid(xe, vfid); + + if (*ppos) + return -ESPIPE; + + return xe_sriov_pf_migration_data_read(xe, vfid, buf, count); +} + +static const struct file_operations data_vf_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = data_write, + .read = data_read, + .llseek = default_llseek, +}; + static void pf_populate_vf(struct xe_device *xe, struct dentry *vfdent) { debugfs_create_file("pause", 0200, vfdent, xe, &pause_vf_fops); @@ -167,6 +211,7 @@ static void pf_populate_vf(struct xe_device *xe, struct dentry *vfdent) debugfs_create_file("reset", 0200, vfdent, xe, &reset_vf_fops); debugfs_create_file("save", 0600, vfdent, xe, &save_vf_fops); debugfs_create_file("restore", 0600, vfdent, xe, &restore_vf_fops); + debugfs_create_file("migration_data", 0600, vfdent, xe, &data_vf_fops); } static void pf_populate_with_tiles(struct xe_device *xe, struct dentry *dent, unsigned int vfid) diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c index d39cee66589b5..9cc178126cbdc 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c @@ -56,6 +56,18 @@ static bool pf_check_migration_support(struct xe_device *xe) return IS_ENABLED(CONFIG_DRM_XE_DEBUG); } +static void pf_migration_cleanup(struct drm_device *dev, void *arg) +{ + struct xe_sriov_pf_migration *migration = arg; + + if (!IS_ERR_OR_NULL(migration->pending)) + xe_sriov_pf_migration_data_free(migration->pending); + if (!IS_ERR_OR_NULL(migration->trailer)) + xe_sriov_pf_migration_data_free(migration->trailer); + if (!IS_ERR_OR_NULL(migration->descriptor)) + xe_sriov_pf_migration_data_free(migration->descriptor); +} + /** * xe_sriov_pf_migration_init() - Initialize support for SR-IOV VF migration. * @xe: the &struct xe_device @@ -65,6 +77,7 @@ static bool pf_check_migration_support(struct xe_device *xe) int xe_sriov_pf_migration_init(struct xe_device *xe) { unsigned int n, totalvfs; + int err; xe_assert(xe, IS_SRIOV_PF(xe)); @@ -76,7 +89,15 @@ int xe_sriov_pf_migration_init(struct xe_device *xe) for (n = 1; n <= totalvfs; n++) { struct xe_sriov_pf_migration *migration = pf_pick_migration(xe, n); + err = drmm_mutex_init(&xe->drm, &migration->lock); + if (err) + return err; + init_waitqueue_head(&migration->wq); + + err = drmm_add_action_or_reset(&xe->drm, pf_migration_cleanup, migration); + if (err) + return err; } return 0; @@ -162,6 +183,36 @@ xe_sriov_pf_migration_consume(struct xe_device *xe, unsigned int vfid) return data; } +static int pf_handle_descriptor(struct xe_device *xe, unsigned int vfid, + struct xe_sriov_pf_migration_data *data) +{ + if (data->tile != 0 || data->gt != 0) + return -EINVAL; + + xe_sriov_pf_migration_data_free(data); + + return 0; +} + +static int pf_handle_trailer(struct xe_device *xe, unsigned int vfid, + struct xe_sriov_pf_migration_data *data) +{ + struct xe_gt *gt; + u8 gt_id; + + if (data->tile != 0 || data->gt != 0) + return -EINVAL; + if (data->offset != 0 || data->size != 0 || data->buff || data->bo) + return -EINVAL; + + xe_sriov_pf_migration_data_free(data); + + for_each_gt(gt, xe, gt_id) + xe_gt_sriov_pf_control_vf_data_eof(gt, vfid); + + return 0; +} + /** * xe_sriov_pf_migration_produce() - Produce a SR-IOV VF migration data packet for device to process * @xe: the &struct xe_device @@ -180,6 +231,11 @@ int xe_sriov_pf_migration_produce(struct xe_device *xe, unsigned int vfid, if (!IS_SRIOV_PF(xe)) return -ENODEV; + if (data->type == XE_SRIOV_MIG_DATA_DESCRIPTOR) + return pf_handle_descriptor(xe, vfid, data); + else if (data->type == XE_SRIOV_MIG_DATA_TRAILER) + return pf_handle_trailer(xe, vfid, data); + gt = xe_device_get_gt(xe, data->gt); if (!gt || data->tile != gt->tile->id) { xe_sriov_err_ratelimited(xe, "VF%d Unknown GT - tile_id:%d, gt_id:%d\n", diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration_data.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration_data.c index cfc6b512c6674..9a2777dcf9a6b 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_migration_data.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration_data.c @@ -5,7 +5,45 @@ #include "xe_bo.h" #include "xe_device.h" +#include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_migration.h" #include "xe_sriov_pf_migration_data.h" +#include "xe_sriov_printk.h" + +static struct mutex *pf_migration_mutex(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + return &xe->sriov.pf.vfs[vfid].migration.lock; +} + +static struct xe_sriov_pf_migration_data **pf_pick_pending(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + lockdep_assert_held(pf_migration_mutex(xe, vfid)); + + return &xe->sriov.pf.vfs[vfid].migration.pending; +} + +static struct xe_sriov_pf_migration_data ** +pf_pick_descriptor(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + lockdep_assert_held(pf_migration_mutex(xe, vfid)); + + return &xe->sriov.pf.vfs[vfid].migration.descriptor; +} + +static struct xe_sriov_pf_migration_data **pf_pick_trailer(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + lockdep_assert_held(pf_migration_mutex(xe, vfid)); + + return &xe->sriov.pf.vfs[vfid].migration.trailer; +} static bool data_needs_bo(struct xe_sriov_pf_migration_data *data) { @@ -133,3 +171,318 @@ int xe_sriov_pf_migration_data_init_from_hdr(struct xe_sriov_pf_migration_data * return mig_data_init(data); } + +static ssize_t vf_mig_data_hdr_read(struct xe_sriov_pf_migration_data *data, + char __user *buf, size_t len) +{ + loff_t offset = sizeof(data->hdr) - data->hdr_remaining; + + if (!data->hdr_remaining) + return -EINVAL; + + if (len > data->hdr_remaining) + len = data->hdr_remaining; + + if (copy_to_user(buf, (void *)&data->hdr + offset, len)) + return -EFAULT; + + data->hdr_remaining -= len; + + return len; +} + +static ssize_t vf_mig_data_read(struct xe_sriov_pf_migration_data *data, + char __user *buf, size_t len) +{ + if (len > data->remaining) + len = data->remaining; + + if (copy_to_user(buf, data->vaddr + (data->size - data->remaining), len)) + return -EFAULT; + + data->remaining -= len; + + return len; +} + +static ssize_t __vf_mig_data_read_single(struct xe_sriov_pf_migration_data **data, + unsigned int vfid, char __user *buf, size_t len) +{ + ssize_t copied = 0; + + if ((*data)->hdr_remaining) + copied = vf_mig_data_hdr_read(*data, buf, len); + else + copied = vf_mig_data_read(*data, buf, len); + + if ((*data)->remaining == 0 && (*data)->hdr_remaining == 0) { + xe_sriov_pf_migration_data_free(*data); + *data = NULL; + } + + return copied; +} + +static struct xe_sriov_pf_migration_data **vf_mig_pick_data(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_pf_migration_data **data; + + data = pf_pick_descriptor(xe, vfid); + if (*data) + return data; + + data = pf_pick_pending(xe, vfid); + if (*data == NULL) + *data = xe_sriov_pf_migration_consume(xe, vfid); + if (!IS_ERR_OR_NULL(*data)) + return data; + else if (IS_ERR(*data) && PTR_ERR(*data) != -ENODATA) + return data; + + data = pf_pick_trailer(xe, vfid); + if (*data) + return data; + + return ERR_PTR(-ENODATA); +} + +static ssize_t vf_mig_data_read_single(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len) +{ + struct xe_sriov_pf_migration_data **data = vf_mig_pick_data(xe, vfid); + + if (IS_ERR_OR_NULL(data)) + return PTR_ERR(data); + + return __vf_mig_data_read_single(data, vfid, buf, len); +} + +/** + * xe_sriov_pf_migration_data_read() - Read migration data from the device + * @gt: the &struct xe_device + * @vfid: the VF identifier + * @buf: start address of userspace buffer + * @len: requested read size from userspace + * + * Return: number of bytes that has been successfully read + * 0 if no more migration data is available + * -errno on failure + */ +ssize_t xe_sriov_pf_migration_data_read(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len) +{ + ssize_t ret, consumed = 0; + + xe_assert(xe, IS_SRIOV_PF(xe)); + + ret = mutex_lock_interruptible(pf_migration_mutex(xe, vfid)); + if (ret) + return ret; + + while (consumed < len) { + ret = vf_mig_data_read_single(xe, vfid, buf, len - consumed); + if (ret == -ENODATA) + goto out; + if (ret < 0) { + mutex_unlock(pf_migration_mutex(xe, vfid)); + return ret; + } + + consumed += ret; + buf += ret; + } + +out: + mutex_unlock(pf_migration_mutex(xe, vfid)); + return consumed; +} + +static ssize_t vf_mig_hdr_write(struct xe_sriov_pf_migration_data *data, + const char __user *buf, size_t len) +{ + loff_t offset = sizeof(data->hdr) - data->hdr_remaining; + int ret; + + if (WARN_ON(!data->hdr_remaining)) + return -EINVAL; + + if (len > data->hdr_remaining) + len = data->hdr_remaining; + + if (copy_from_user((void *)&data->hdr + offset, buf, len)) + return -EFAULT; + + data->hdr_remaining -= len; + + if (!data->hdr_remaining) { + ret = xe_sriov_pf_migration_data_init_from_hdr(data); + if (ret) + return ret; + } + + return len; +} + +static ssize_t vf_mig_data_write(struct xe_sriov_pf_migration_data *data, + const char __user *buf, size_t len) +{ + if (len > data->remaining) + len = data->remaining; + + if (copy_from_user(data->vaddr + (data->size - data->remaining), buf, len)) + return -EFAULT; + + data->remaining -= len; + + return len; +} + +static ssize_t vf_mig_data_write_single(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len) +{ + struct xe_sriov_pf_migration_data **data = pf_pick_pending(xe, vfid); + int ret; + ssize_t copied; + + if (IS_ERR_OR_NULL(*data)) { + *data = xe_sriov_pf_migration_data_alloc(xe); + if (*data == NULL) + return -ENOMEM; + } + + if ((*data)->hdr_remaining) + copied = vf_mig_hdr_write(*data, buf, len); + else + copied = vf_mig_data_write(*data, buf, len); + + if ((*data)->hdr_remaining == 0 && (*data)->remaining == 0) { + ret = xe_sriov_pf_migration_produce(xe, vfid, *data); + if (ret) { + xe_sriov_pf_migration_data_free(*data); + return ret; + } + + *data = NULL; + } + + return copied; +} + +/** + * xe_sriov_pf_migration_data_write() - Write migration data to the device + * @gt: the &struct xe_device + * @vfid: the VF identifier + * @buf: start address of userspace buffer + * @len: requested write size from userspace + * + * Return: number of bytes that has been successfully written + * -errno on failure + */ +ssize_t xe_sriov_pf_migration_data_write(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len) +{ + ssize_t ret, produced = 0; + + xe_assert(xe, IS_SRIOV_PF(xe)); + + ret = mutex_lock_interruptible(pf_migration_mutex(xe, vfid)); + if (ret) + return ret; + + while (produced < len) { + ret = vf_mig_data_write_single(xe, vfid, buf, len - produced); + if (ret < 0) { + mutex_unlock(pf_migration_mutex(xe, vfid)); + return ret; + } + + produced += ret; + buf += ret; + } + + mutex_unlock(pf_migration_mutex(xe, vfid)); + return produced; +} + +#define MIGRATION_DESC_SIZE 4 +static size_t pf_desc_init(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_pf_migration_data **desc = pf_pick_descriptor(xe, vfid); + struct xe_sriov_pf_migration_data *data; + int ret; + + data = xe_sriov_pf_migration_data_alloc(xe); + if (!data) + return -ENOMEM; + + ret = xe_sriov_pf_migration_data_init(data, 0, 0, XE_SRIOV_MIG_DATA_DESCRIPTOR, + 0, MIGRATION_DESC_SIZE); + if (ret) { + xe_sriov_pf_migration_data_free(data); + return ret; + } + + *desc = data; + + return 0; +} + +static void pf_pending_init(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_pf_migration_data **data = pf_pick_pending(xe, vfid); + + *data = NULL; +} + +#define MIGRATION_TRAILER_SIZE 0 +static int pf_trailer_init(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_pf_migration_data **trailer = pf_pick_trailer(xe, vfid); + struct xe_sriov_pf_migration_data *data; + int ret; + + data = xe_sriov_pf_migration_data_alloc(xe); + if (!data) + return -ENOMEM; + + ret = xe_sriov_pf_migration_data_init(data, 0, 0, XE_SRIOV_MIG_DATA_TRAILER, + 0, MIGRATION_TRAILER_SIZE); + if (ret) { + xe_sriov_pf_migration_data_free(data); + return ret; + } + + *trailer = data; + + return 0; +} + +/** + * xe_sriov_pf_migration_data_save_init() - Initialize the pending save migration data. + * @gt: the &struct xe_device + * @vfid: the VF identifier + * + * Return: 0 on success, -errno on failure + */ +int xe_sriov_pf_migration_data_save_init(struct xe_device *xe, unsigned int vfid) +{ + int ret; + + ret = mutex_lock_interruptible(pf_migration_mutex(xe, vfid)); + if (ret) + return ret; + + ret = pf_desc_init(xe, vfid); + if (ret) + goto out; + + ret = pf_trailer_init(xe, vfid); + if (ret) + goto out; + + pf_pending_init(xe, vfid); + +out: + mutex_unlock(pf_migration_mutex(xe, vfid)); + return ret; +} diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration_data.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration_data.h index 1dde4cfcdbc47..5b96c7f224002 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_migration_data.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration_data.h @@ -28,5 +28,10 @@ void xe_sriov_pf_migration_data_free(struct xe_sriov_pf_migration_data *snapshot int xe_sriov_pf_migration_data_init(struct xe_sriov_pf_migration_data *data, u8 tile_id, u8 gt_id, unsigned int type, loff_t offset, size_t size); int xe_sriov_pf_migration_data_init_from_hdr(struct xe_sriov_pf_migration_data *snapshot); +ssize_t xe_sriov_pf_migration_data_read(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len); +ssize_t xe_sriov_pf_migration_data_write(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len); +int xe_sriov_pf_migration_data_save_init(struct xe_device *xe, unsigned int vfid); #endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h index 80fdea32b884a..c5d75bb7f39c0 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h @@ -7,6 +7,7 @@ #define _XE_SRIOV_PF_MIGRATION_TYPES_H_ #include +#include #include struct xe_sriov_pf_migration_data { @@ -32,6 +33,14 @@ struct xe_sriov_pf_migration_data { struct xe_sriov_pf_migration { /** @wq: waitqueue used to avoid busy-waiting for snapshot production/consumption */ wait_queue_head_t wq; + /** @lock: Mutex protecting the migration data */ + struct mutex lock; + /** @pending: currently processed data packet of VF resource */ + struct xe_sriov_pf_migration_data *pending; + /** @trailer: data packet used to indicate the end of stream */ + struct xe_sriov_pf_migration_data *trailer; + /** @descriptor: data packet containing the metadata describing the device */ + struct xe_sriov_pf_migration_data *descriptor; }; #endif -- 2.50.1