The calculation of the upper limit for queues does not depend solely on the number of online CPUs; for example, the isolcpus kernel command-line option must also be considered. To account for this, the block layer provides a helper function to retrieve the maximum number of queues. Use it to set an appropriate upper queue number limit. Fixes: 94970cfb5f10 ("scsi: use block layer helpers to calculate num of queues") Signed-off-by: Daniel Wagner --- drivers/scsi/aacraid/comminit.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 726c8531b7d3fbff4cc7b6a7ac4891f7bcb1c12f..788d7bf0a2d371fd3b38d88b0a9d76937f37d28b 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -469,8 +469,7 @@ void aac_define_int_mode(struct aac_dev *dev) } /* Don't bother allocating more MSI-X vectors than cpus */ - msi_count = min(dev->max_msix, - (unsigned int)num_online_cpus()); + msi_count = blk_mq_num_online_queues(dev->max_msix); dev->max_msix = msi_count; -- 2.51.0 The support for the !SMP configuration has been removed from the core by commit cac5cefbade9 ("sched/smp: Make SMP unconditional"). Signed-off-by: Daniel Wagner --- lib/group_cpus.c | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/lib/group_cpus.c b/lib/group_cpus.c index 6d08ac05f371bf880571507d935d9eb501616a84..f254b232522d44c141cdc4e44e2c99a4148c08d6 100644 --- a/lib/group_cpus.c +++ b/lib/group_cpus.c @@ -9,8 +9,6 @@ #include #include -#ifdef CONFIG_SMP - static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, unsigned int cpus_per_grp) { @@ -425,22 +423,4 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks) *nummasks = min(nr_present + nr_others, numgrps); return masks; } -#else /* CONFIG_SMP */ -struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks) -{ - struct cpumask *masks; - - if (numgrps == 0) - return NULL; - - masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL); - if (!masks) - return NULL; - - /* assign all CPUs(cpu 0) to the 1st group only */ - cpumask_copy(&masks[0], cpu_possible_mask); - *nummasks = 1; - return masks; -} -#endif /* CONFIG_SMP */ EXPORT_SYMBOL_GPL(group_cpus_evenly); -- 2.51.0 group_mask_cpu_evenly() allows the caller to pass in a CPU mask that should be evenly distributed. This new function is a more generic version of the existing group_cpus_evenly(), which always distributes all present CPUs into groups. Reviewed-by: Hannes Reinecke Signed-off-by: Daniel Wagner --- include/linux/group_cpus.h | 3 +++ lib/group_cpus.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) diff --git a/include/linux/group_cpus.h b/include/linux/group_cpus.h index 9d4e5ab6c314b31c09fda82c3f6ac18f77e9de36..defab4123a82fa37cb2a9920029be8e3e121ca0d 100644 --- a/include/linux/group_cpus.h +++ b/include/linux/group_cpus.h @@ -10,5 +10,8 @@ #include struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks); +struct cpumask *group_mask_cpus_evenly(unsigned int numgrps, + const struct cpumask *mask, + unsigned int *nummasks); #endif diff --git a/lib/group_cpus.c b/lib/group_cpus.c index f254b232522d44c141cdc4e44e2c99a4148c08d6..ec0852132266618f540c580422f254684129ce90 100644 --- a/lib/group_cpus.c +++ b/lib/group_cpus.c @@ -8,6 +8,7 @@ #include #include #include +#include static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, unsigned int cpus_per_grp) @@ -424,3 +425,61 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks) return masks; } EXPORT_SYMBOL_GPL(group_cpus_evenly); + +/** + * group_mask_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality + * @numgrps: number of cpumasks to create + * @mask: CPUs to consider for the grouping + * @nummasks: number of initialized cpusmasks + * + * Return: cpumask array if successful, NULL otherwise. Only the CPUs + * marked in the mask will be considered for the grouping. And each + * element includes CPUs assigned to this group. nummasks contains the + * number of initialized masks which can be less than numgrps. cpu_mask + * + * Try to put close CPUs from viewpoint of CPU and NUMA locality into + * same group, and run two-stage grouping: + * 1) allocate present CPUs on these groups evenly first + * 2) allocate other possible CPUs on these groups evenly + * + * We guarantee in the resulted grouping that all CPUs are covered, and + * no same CPU is assigned to multiple groups + */ +struct cpumask *group_mask_cpus_evenly(unsigned int numgrps, + const struct cpumask *mask, + unsigned int *nummasks) +{ + cpumask_var_t *node_to_cpumask; + cpumask_var_t nmsk; + int ret = -ENOMEM; + struct cpumask *masks = NULL; + + if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) + return NULL; + + node_to_cpumask = alloc_node_to_cpumask(); + if (!node_to_cpumask) + goto fail_nmsk; + + masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL); + if (!masks) + goto fail_node_to_cpumask; + + build_node_to_cpumask(node_to_cpumask); + + ret = __group_cpus_evenly(0, numgrps, node_to_cpumask, mask, nmsk, + masks); + +fail_node_to_cpumask: + free_node_to_cpumask(node_to_cpumask); + +fail_nmsk: + free_cpumask_var(nmsk); + if (ret < 0) { + kfree(masks); + return NULL; + } + *nummasks = ret; + return masks; +} +EXPORT_SYMBOL_GPL(group_mask_cpus_evenly); -- 2.51.0 Pass a cpumask to irq_create_affinity_masks as an additional constraint to consider when creating the affinity masks. This allows the caller to exclude specific CPUs, e.g., isolated CPUs (see the 'isolcpus' kernel command-line parameter). Reviewed-by: Hannes Reinecke Signed-off-by: Daniel Wagner --- include/linux/interrupt.h | 16 ++++++++++------ kernel/irq/affinity.c | 12 ++++++++++-- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 51b6484c049345c75816c4a63b4efa813f42f27b..b1a230953514da57e30e601727cd0e94796153d3 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -284,18 +284,22 @@ struct irq_affinity_notify { * @nr_sets: The number of interrupt sets for which affinity * spreading is required * @set_size: Array holding the size of each interrupt set + * @mask: cpumask that constrains which CPUs to consider when + * calculating the number and size of the interrupt sets * @calc_sets: Callback for calculating the number and size * of interrupt sets * @priv: Private data for usage by @calc_sets, usually a * pointer to driver/device specific data. */ struct irq_affinity { - unsigned int pre_vectors; - unsigned int post_vectors; - unsigned int nr_sets; - unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; - void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); - void *priv; + unsigned int pre_vectors; + unsigned int post_vectors; + unsigned int nr_sets; + unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; + const struct cpumask *mask; + void (*calc_sets)(struct irq_affinity *, + unsigned int nvecs); + void *priv; }; /** diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 4013e6ad2b2f1cb91de12bb428b3281105f7d23b..c68156f7847a7920103e39124676d06191304ef6 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -70,7 +70,13 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd) */ for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) { unsigned int nr_masks, this_vecs = affd->set_size[i]; - struct cpumask *result = group_cpus_evenly(this_vecs, &nr_masks); + struct cpumask *result; + + if (affd->mask) + result = group_mask_cpus_evenly(this_vecs, affd->mask, + &nr_masks); + else + result = group_cpus_evenly(this_vecs, &nr_masks); if (!result) { kfree(masks); @@ -115,7 +121,9 @@ unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, if (resv > minvec) return 0; - if (affd->calc_sets) { + if (affd->mask) { + set_vecs = cpumask_weight(affd->mask); + } else if (affd->calc_sets) { set_vecs = maxvec - resv; } else { cpus_read_lock(); -- 2.51.0 Introduce blk_mq_{online|possible}_queue_affinity, which returns the queue-to-CPU mapping constraints defined by the block layer. This allows other subsystems (e.g., IRQ affinity setup) to respect block layer requirements. It is necessary to provide versions for both the online and possible CPU masks because some drivers want to spread their I/O queues only across online CPUs, while others prefer to use all possible CPUs. And the mask used needs to match with the number of queues requested (see blk_num_{online|possible}_queues). Reviewed-by: Hannes Reinecke Signed-off-by: Daniel Wagner --- block/blk-mq-cpumap.c | 24 ++++++++++++++++++++++++ include/linux/blk-mq.h | 2 ++ 2 files changed, 26 insertions(+) diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 705da074ad6c7e88042296f21b739c6d686a72b6..8244ecf878358c0b8de84458dcd5100c2f360213 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -26,6 +26,30 @@ static unsigned int blk_mq_num_queues(const struct cpumask *mask, return min_not_zero(num, max_queues); } +/** + * blk_mq_possible_queue_affinity - Return block layer queue affinity + * + * Returns an affinity mask that represents the queue-to-CPU mapping + * requested by the block layer based on possible CPUs. + */ +const struct cpumask *blk_mq_possible_queue_affinity(void) +{ + return cpu_possible_mask; +} +EXPORT_SYMBOL_GPL(blk_mq_possible_queue_affinity); + +/** + * blk_mq_online_queue_affinity - Return block layer queue affinity + * + * Returns an affinity mask that represents the queue-to-CPU mapping + * requested by the block layer based on online CPUs. + */ +const struct cpumask *blk_mq_online_queue_affinity(void) +{ + return cpu_online_mask; +} +EXPORT_SYMBOL_GPL(blk_mq_online_queue_affinity); + /** * blk_mq_num_possible_queues - Calc nr of queues for multiqueue devices * @max_queues: The maximum number of queues the hardware/driver diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 2a5a828f19a0ba6ff0812daf40eed67f0e12ada1..1144017dce47af82f9d010e42bfbf26fa4ddf33f 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -947,6 +947,8 @@ int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, void blk_mq_unfreeze_queue_non_owner(struct request_queue *q); void blk_freeze_queue_start_non_owner(struct request_queue *q); +const struct cpumask *blk_mq_possible_queue_affinity(void); +const struct cpumask *blk_mq_online_queue_affinity(void); unsigned int blk_mq_num_possible_queues(unsigned int max_queues); unsigned int blk_mq_num_online_queues(unsigned int max_queues); void blk_mq_map_queues(struct blk_mq_queue_map *qmap); -- 2.51.0 Ensure that IRQ affinity setup also respects the queue-to-CPU mapping constraints provided by the block layer. This allows the NVMe driver to avoid assigning interrupts to CPUs that the block layer has excluded (e.g., isolated CPUs). Reviewed-by: Hannes Reinecke Signed-off-by: Daniel Wagner --- drivers/nvme/host/pci.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 2c6d9506b172509fb35716eba456c375f52f5b86..1d9c13aeddb12fa39eef68b7288d1f13eb98a0d7 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2604,6 +2604,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) .pre_vectors = 1, .calc_sets = nvme_calc_irq_sets, .priv = dev, + .mask = blk_mq_possible_queue_affinity(), }; unsigned int irq_queues, poll_queues; unsigned int flags = PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY; -- 2.51.0 Ensure that IRQ affinity setup also respects the queue-to-CPU mapping constraints provided by the block layer. This allows the SCSI drivers to avoid assigning interrupts to CPUs that the block layer has excluded (e.g., isolated CPUs). Only convert drivers which are already using the pci_alloc_irq_vectors_affinity with the PCI_IRQ_AFFINITY flag set. Because these drivers are enabled to let the IRQ core code to set the affinity. Also don't update qla2xxx because the nvme-fabrics code is not ready yet. Signed-off-by: Daniel Wagner --- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 1 + drivers/scsi/megaraid/megaraid_sas_base.c | 5 ++++- drivers/scsi/mpi3mr/mpi3mr_fw.c | 6 +++++- drivers/scsi/mpt3sas/mpt3sas_base.c | 5 ++++- drivers/scsi/pm8001/pm8001_init.c | 1 + 5 files changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 2f3d61abab3a66bf0b40a27b9411dc2cab1c44fc..9f3194ac9c0fb53d619e3a108935ef109308d131 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -2607,6 +2607,7 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) struct pci_dev *pdev = hisi_hba->pci_dev; struct irq_affinity desc = { .pre_vectors = BASE_VECTORS_V3_HW, + .mask = blk_mq_online_queue_affinity(), }; min_msi = MIN_AFFINE_VECTORS_V3_HW; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 615e06fd4ee8e5d1c14ef912460962eacb450c04..c8df2dc47689a5dad02e1364de1d71e24f6159d0 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5927,7 +5927,10 @@ static int __megasas_alloc_irq_vectors(struct megasas_instance *instance) { int i, irq_flags; - struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start }; + struct irq_affinity desc = { + .pre_vectors = instance->low_latency_index_start, + .mask = blk_mq_online_queue_affinity(), + }; struct irq_affinity *descp = &desc; irq_flags = PCI_IRQ_MSIX; diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index 0152d31d430abd17ab6b71f248435d9c7c417269..a8fbc84e0ab2ed7ca68a3b874ecfa78a8ebf0c47 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -825,7 +825,11 @@ static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) int max_vectors, min_vec; int retval; int i; - struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 }; + struct irq_affinity desc = { + .pre_vectors = 1, + .post_vectors = 1, + .mask = blk_mq_online_queue_affinity(), + }; if (mrioc->is_intr_info_set) return 0; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index bd3efa5b46c780d43fae58c12f0bce5057dcda85..a55dd75221a6079a29f6ebee402b3654b94411c1 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -3364,7 +3364,10 @@ static int _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc) { int i, irq_flags = PCI_IRQ_MSIX; - struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues }; + struct irq_affinity desc = { + .pre_vectors = ioc->high_iops_queues, + .mask = blk_mq_online_queue_affinity(), + }; struct irq_affinity *descp = &desc; /* * Don't allocate msix vectors for poll_queues. diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 599410bcdfea59aba40e3dd6749434b7b5966d48..1d4807eeed75acdfe091a3c0560a926ebb59e1e8 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -977,6 +977,7 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) */ struct irq_affinity desc = { .pre_vectors = 1, + .mask = blk_mq_online_queue_affinity(), }; rc = pci_alloc_irq_vectors_affinity( pm8001_ha->pdev, 2, PM8001_MAX_MSIX_VEC, -- 2.51.0 Ensure that IRQ affinity setup also respects the queue-to-CPU mapping constraints provided by the block layer. This allows the virtio drivers to avoid assigning interrupts to CPUs that the block layer has excluded (e.g., isolated CPUs). Reviewed-by: Hannes Reinecke Signed-off-by: Daniel Wagner --- drivers/block/virtio_blk.c | 4 +++- drivers/scsi/virtio_scsi.c | 5 ++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index e649fa67bac16b4f0c6e8e8f0e6bec111897c355..41b06540c7fb22fd1d2708338c514947c4bdeefe 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -963,7 +963,9 @@ static int init_vq(struct virtio_blk *vblk) unsigned short num_vqs; unsigned short num_poll_vqs; struct virtio_device *vdev = vblk->vdev; - struct irq_affinity desc = { 0, }; + struct irq_affinity desc = { + .mask = blk_mq_possible_queue_affinity(), + }; err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ, struct virtio_blk_config, num_queues, diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 96a69edddbe5555574fc8fed1ba7c82a99df4472..67dfb265bf9e54adc68978ac8d93187e6629c330 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -842,7 +842,10 @@ static int virtscsi_init(struct virtio_device *vdev, u32 num_vqs, num_poll_vqs, num_req_vqs; struct virtqueue_info *vqs_info; struct virtqueue **vqs; - struct irq_affinity desc = { .pre_vectors = 2 }; + struct irq_affinity desc = { + .pre_vectors = 2, + .mask = blk_mq_possible_queue_affinity(), + }; num_req_vqs = vscsi->num_queues; num_vqs = num_req_vqs + VIRTIO_SCSI_VQ_BASE; -- 2.51.0 Multiqueue drivers spread I/O queues across all CPUs for optimal performance. However, these drivers are not aware of CPU isolation requirements and will distribute queues without considering the isolcpus configuration. Introduce a new isolcpus mask that allows users to define which CPUs should have I/O queues assigned. This is similar to managed_irq, but intended for drivers that do not use the managed IRQ infrastructure Reviewed-by: Hannes Reinecke Reviewed-by: Aaron Tomlin Signed-off-by: Daniel Wagner --- include/linux/sched/isolation.h | 1 + kernel/sched/isolation.c | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index d8501f4709b583b8a1c91574446382f093bccdb1..6b6ae9c5b2f61a93c649a98ea27482b932627fca 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -9,6 +9,7 @@ enum hk_type { HK_TYPE_DOMAIN, HK_TYPE_MANAGED_IRQ, + HK_TYPE_IO_QUEUE, HK_TYPE_KERNEL_NOISE, HK_TYPE_MAX, diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index a4cf17b1fab062f536c7f4f47c35f0e209fd25d6..0d59cc95bf3b8fa2f06cb562ce1baf3fdd48c9db 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -13,6 +13,7 @@ enum hk_flags { HK_FLAG_DOMAIN = BIT(HK_TYPE_DOMAIN), HK_FLAG_MANAGED_IRQ = BIT(HK_TYPE_MANAGED_IRQ), + HK_FLAG_IO_QUEUE = BIT(HK_TYPE_IO_QUEUE), HK_FLAG_KERNEL_NOISE = BIT(HK_TYPE_KERNEL_NOISE), }; @@ -226,6 +227,12 @@ static int __init housekeeping_isolcpus_setup(char *str) continue; } + if (!strncmp(str, "io_queue,", 9)) { + str += 9; + flags |= HK_FLAG_IO_QUEUE; + continue; + } + /* * Skip unknown sub-parameter and validate that it is not * containing an invalid character. -- 2.51.0 Extend the capabilities of the generic CPU to hardware queue (hctx) mapping code, so it maps houskeeping CPUs and isolated CPUs to the hardware queues evenly. A hctx is only operational when there is at least one online housekeeping CPU assigned (aka active_hctx). Thus, check the final mapping that there is no hctx which has only offline housekeeing CPU and online isolated CPUs. Example mapping result: 16 online CPUs isolcpus=io_queue,2-3,6-7,12-13 Queue mapping: hctx0: default 0 2 hctx1: default 1 3 hctx2: default 4 6 hctx3: default 5 7 hctx4: default 8 12 hctx5: default 9 13 hctx6: default 10 hctx7: default 11 hctx8: default 14 hctx9: default 15 IRQ mapping: irq 42 affinity 0 effective 0 nvme0q0 irq 43 affinity 0 effective 0 nvme0q1 irq 44 affinity 1 effective 1 nvme0q2 irq 45 affinity 4 effective 4 nvme0q3 irq 46 affinity 5 effective 5 nvme0q4 irq 47 affinity 8 effective 8 nvme0q5 irq 48 affinity 9 effective 9 nvme0q6 irq 49 affinity 10 effective 10 nvme0q7 irq 50 affinity 11 effective 11 nvme0q8 irq 51 affinity 14 effective 14 nvme0q9 irq 52 affinity 15 effective 15 nvme0q10 A corner case is when the number of online CPUs and present CPUs differ and the driver asks for less queues than online CPUs, e.g. 8 online CPUs, 16 possible CPUs isolcpus=io_queue,2-3,6-7,12-13 virtio_blk.num_request_queues=2 Queue mapping: hctx0: default 0 1 2 3 4 5 6 7 8 12 13 hctx1: default 9 10 11 14 15 IRQ mapping irq 27 affinity 0 effective 0 virtio0-config irq 28 affinity 0-1,4-5,8 effective 5 virtio0-req.0 irq 29 affinity 9-11,14-15 effective 0 virtio0-req.1 Noteworthy is that for the normal/default configuration (!isoclpus) the mapping will change for systems which have non hyperthreading CPUs. The main assignment loop will completely rely that group_mask_cpus_evenly to do the right thing. The old code would distribute the CPUs linearly over the hardware context: queue mapping for /dev/nvme0n1 hctx0: default 0 8 hctx1: default 1 9 hctx2: default 2 10 hctx3: default 3 11 hctx4: default 4 12 hctx5: default 5 13 hctx6: default 6 14 hctx7: default 7 15 The assign each hardware context the map generated by the group_mask_cpus_evenly function: queue mapping for /dev/nvme0n1 hctx0: default 0 1 hctx1: default 2 3 hctx2: default 4 5 hctx3: default 6 7 hctx4: default 8 9 hctx5: default 10 11 hctx6: default 12 13 hctx7: default 14 15 In case of hyperthreading CPUs, the resulting map stays the same. Signed-off-by: Daniel Wagner --- block/blk-mq-cpumap.c | 177 ++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 158 insertions(+), 19 deletions(-) diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 8244ecf878358c0b8de84458dcd5100c2f360213..1e66882e4d5bd9f78d132f3a229a1577853f7a9f 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -17,12 +17,25 @@ #include "blk.h" #include "blk-mq.h" +static struct cpumask blk_hk_online_mask; + static unsigned int blk_mq_num_queues(const struct cpumask *mask, unsigned int max_queues) { unsigned int num; - num = cpumask_weight(mask); + if (housekeeping_enabled(HK_TYPE_IO_QUEUE)) { + const struct cpumask *hk_mask; + struct cpumask avail_mask; + + hk_mask = housekeeping_cpumask(HK_TYPE_IO_QUEUE); + cpumask_and(&avail_mask, mask, hk_mask); + + num = cpumask_weight(&avail_mask); + } else { + num = cpumask_weight(mask); + } + return min_not_zero(num, max_queues); } @@ -31,9 +44,13 @@ static unsigned int blk_mq_num_queues(const struct cpumask *mask, * * Returns an affinity mask that represents the queue-to-CPU mapping * requested by the block layer based on possible CPUs. + * This helper takes isolcpus settings into account. */ const struct cpumask *blk_mq_possible_queue_affinity(void) { + if (housekeeping_enabled(HK_TYPE_IO_QUEUE)) + return housekeeping_cpumask(HK_TYPE_IO_QUEUE); + return cpu_possible_mask; } EXPORT_SYMBOL_GPL(blk_mq_possible_queue_affinity); @@ -46,6 +63,12 @@ EXPORT_SYMBOL_GPL(blk_mq_possible_queue_affinity); */ const struct cpumask *blk_mq_online_queue_affinity(void) { + if (housekeeping_enabled(HK_TYPE_IO_QUEUE)) { + cpumask_and(&blk_hk_online_mask, cpu_online_mask, + housekeeping_cpumask(HK_TYPE_IO_QUEUE)); + return &blk_hk_online_mask; + } + return cpu_online_mask; } EXPORT_SYMBOL_GPL(blk_mq_online_queue_affinity); @@ -57,7 +80,8 @@ EXPORT_SYMBOL_GPL(blk_mq_online_queue_affinity); * ignored. * * Calculates the number of queues to be used for a multiqueue - * device based on the number of possible CPUs. + * device based on the number of possible CPUs. This helper + * takes isolcpus settings into account. */ unsigned int blk_mq_num_possible_queues(unsigned int max_queues) { @@ -72,7 +96,8 @@ EXPORT_SYMBOL_GPL(blk_mq_num_possible_queues); * ignored. * * Calculates the number of queues to be used for a multiqueue - * device based on the number of online CPUs. + * device based on the number of online CPUs. This helper + * takes isolcpus settings into account. */ unsigned int blk_mq_num_online_queues(unsigned int max_queues) { @@ -80,23 +105,104 @@ unsigned int blk_mq_num_online_queues(unsigned int max_queues) } EXPORT_SYMBOL_GPL(blk_mq_num_online_queues); +static bool blk_mq_validate(struct blk_mq_queue_map *qmap, + const struct cpumask *active_hctx) +{ + /* + * Verify if the mapping is usable when housekeeping + * configuration is enabled + */ + + for (int queue = 0; queue < qmap->nr_queues; queue++) { + int cpu; + + if (cpumask_test_cpu(queue, active_hctx)) { + /* + * This htcx has at least one online CPU thus it + * is able to serve any assigned isolated CPU. + */ + continue; + } + + /* + * There is no housekeeping online CPU for this hctx, all + * good as long as all non houskeeping CPUs are also + * offline. + */ + for_each_online_cpu(cpu) { + if (qmap->mq_map[cpu] != queue) + continue; + + pr_warn("Unable to create a usable CPU-to-queue mapping with the given constraints\n"); + return false; + } + } + + return true; +} + +static void blk_mq_map_fallback(struct blk_mq_queue_map *qmap) +{ + unsigned int cpu; + + /* + * Map all CPUs to the first hctx to ensure at least one online + * CPU is serving it. + */ + for_each_possible_cpu(cpu) + qmap->mq_map[cpu] = 0; +} + void blk_mq_map_queues(struct blk_mq_queue_map *qmap) { - const struct cpumask *masks; + struct cpumask *masks __free(kfree) = NULL; + const struct cpumask *constraint; unsigned int queue, cpu, nr_masks; + cpumask_var_t active_hctx; - masks = group_cpus_evenly(qmap->nr_queues, &nr_masks); - if (!masks) { - for_each_possible_cpu(cpu) - qmap->mq_map[cpu] = qmap->queue_offset; - return; - } + if (!zalloc_cpumask_var(&active_hctx, GFP_KERNEL)) + goto fallback; + + if (housekeeping_enabled(HK_TYPE_IO_QUEUE)) + constraint = housekeeping_cpumask(HK_TYPE_IO_QUEUE); + else + constraint = cpu_possible_mask; + + /* Map CPUs to the hardware contexts (hctx) */ + masks = group_mask_cpus_evenly(qmap->nr_queues, constraint, &nr_masks); + if (!masks) + goto free_fallback; for (queue = 0; queue < qmap->nr_queues; queue++) { - for_each_cpu(cpu, &masks[queue % nr_masks]) - qmap->mq_map[cpu] = qmap->queue_offset + queue; + unsigned int idx = (qmap->queue_offset + queue) % nr_masks; + + for_each_cpu(cpu, &masks[idx]) { + qmap->mq_map[cpu] = idx; + + if (cpu_online(cpu)) + cpumask_set_cpu(qmap->mq_map[cpu], active_hctx); + } } - kfree(masks); + + /* Map any unassigned CPU evenly to the hardware contexts (hctx) */ + queue = cpumask_first(active_hctx); + for_each_cpu_andnot(cpu, cpu_possible_mask, constraint) { + qmap->mq_map[cpu] = (qmap->queue_offset + queue) % nr_masks; + queue = cpumask_next_wrap(queue, active_hctx); + } + + if (!blk_mq_validate(qmap, active_hctx)) + goto free_fallback; + + free_cpumask_var(active_hctx); + + return; + +free_fallback: + free_cpumask_var(active_hctx); + +fallback: + blk_mq_map_fallback(qmap); } EXPORT_SYMBOL_GPL(blk_mq_map_queues); @@ -133,24 +239,57 @@ void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap, struct device *dev, unsigned int offset) { - const struct cpumask *mask; + cpumask_var_t active_hctx, mask; unsigned int queue, cpu; if (!dev->bus->irq_get_affinity) goto fallback; + if (!zalloc_cpumask_var(&active_hctx, GFP_KERNEL)) + goto fallback; + + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { + free_cpumask_var(active_hctx); + goto fallback; + } + + /* Map CPUs to the hardware contexts (hctx) */ for (queue = 0; queue < qmap->nr_queues; queue++) { - mask = dev->bus->irq_get_affinity(dev, queue + offset); - if (!mask) - goto fallback; + const struct cpumask *affinity_mask; + + affinity_mask = dev->bus->irq_get_affinity(dev, offset + queue); + if (!affinity_mask) + goto free_fallback; - for_each_cpu(cpu, mask) + for_each_cpu(cpu, affinity_mask) { qmap->mq_map[cpu] = qmap->queue_offset + queue; + + cpumask_set_cpu(cpu, mask); + if (cpu_online(cpu)) + cpumask_set_cpu(qmap->mq_map[cpu], active_hctx); + } + } + + /* Map any unassigned CPU evenly to the hardware contexts (hctx) */ + queue = cpumask_first(active_hctx); + for_each_cpu_andnot(cpu, cpu_possible_mask, mask) { + qmap->mq_map[cpu] = qmap->queue_offset + queue; + queue = cpumask_next_wrap(queue, active_hctx); } + if (!blk_mq_validate(qmap, active_hctx)) + goto free_fallback; + + free_cpumask_var(active_hctx); + free_cpumask_var(mask); + return; +free_fallback: + free_cpumask_var(active_hctx); + free_cpumask_var(mask); + fallback: - blk_mq_map_queues(qmap); + blk_mq_map_fallback(qmap); } EXPORT_SYMBOL_GPL(blk_mq_map_hw_queues); -- 2.51.0 When isolcpus=io_queue is enabled, and the last housekeeping CPU for a given hctx goes offline, there would be no CPU left to handle I/O. To prevent I/O stalls, prevent offlining housekeeping CPUs that are still serving isolated CPUs. When isolcpus=io_queue is enabled and the last housekeeping CPU for a given hctx goes offline, no CPU would be left to handle I/O. To prevent I/O stalls, disallow offlining housekeeping CPUs that are still serving isolated CPUs. Reviewed-by: Aaron Tomlin Reviewed-by: Hannes Reinecke Signed-off-by: Daniel Wagner --- block/blk-mq.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/block/blk-mq.c b/block/blk-mq.c index ba3a4b77f5786e5372adce53e4fff5aa2ace24aa..d48be77919e671a81077f7042103699a80959664 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3683,6 +3683,43 @@ static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) return data.has_rq; } +static bool blk_mq_hctx_can_offline_hk_cpu(struct blk_mq_hw_ctx *hctx, + unsigned int this_cpu) +{ + const struct cpumask *hk_mask = housekeeping_cpumask(HK_TYPE_IO_QUEUE); + + for (int i = 0; i < hctx->nr_ctx; i++) { + struct blk_mq_ctx *ctx = hctx->ctxs[i]; + + if (ctx->cpu == this_cpu) + continue; + + /* + * Check if this context has at least one online + * housekeeping CPU; in this case the hardware context is + * usable. + */ + if (cpumask_test_cpu(ctx->cpu, hk_mask) && + cpu_online(ctx->cpu)) + break; + + /* + * The context doesn't have any online housekeeping CPUs, + * but there might be an online isolated CPU mapped to + * it. + */ + if (cpu_is_offline(ctx->cpu)) + continue; + + pr_warn("%s: trying to offline hctx%d but there is still an online isolcpu CPU %d mapped to it\n", + hctx->queue->disk->disk_name, + hctx->queue_num, ctx->cpu); + return false; + } + + return true; +} + static bool blk_mq_hctx_has_online_cpu(struct blk_mq_hw_ctx *hctx, unsigned int this_cpu) { @@ -3714,6 +3751,11 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_online); + if (housekeeping_enabled(HK_TYPE_IO_QUEUE)) { + if (!blk_mq_hctx_can_offline_hk_cpu(hctx, cpu)) + return -EINVAL; + } + if (blk_mq_hctx_has_online_cpu(hctx, cpu)) return 0; -- 2.51.0 The io_queue flag informs multiqueue device drivers where to place hardware queues. Document this new flag in the isolcpus command-line argument description. Reviewed-by: Aaron Tomlin Reviewed-by: Hannes Reinecke Signed-off-by: Daniel Wagner --- Documentation/admin-guide/kernel-parameters.txt | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 747a55abf4946bb9efe320f0f62fdcd1560b0a71..4161d4277a7086f2a3726617826c50888eefb260 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2653,7 +2653,6 @@ "number of CPUs in system - 1". managed_irq - Isolate from being targeted by managed interrupts which have an interrupt mask containing isolated CPUs. The affinity of managed interrupts is @@ -2676,6 +2675,27 @@ housekeeping CPUs has no influence on those queues. + io_queue + Isolate from I/O queue work caused by multiqueue + device drivers. Restrict the placement of + queues to housekeeping CPUs only, ensuring that + all I/O work is processed by a housekeeping CPU. + + The io_queue configuration takes precedence + over managed_irq. When io_queue is used, + managed_irq placement constrains have no + effect. + + Note: Offlining housekeeping CPUS which serve + isolated CPUs will be rejected. Isolated CPUs + need to be offlined before offlining the + housekeeping CPUs. + + Note: When an isolated CPU issues an I/O request, + it is forwarded to a housekeeping CPU. This will + trigger a software interrupt on the completion + path. + The format of is described above. iucv= [HW,NET] -- 2.51.0