Add the `QueueMap` and `QueueType` types as Rust abstractions for CPU to hardware queue mappings. The `QueueMap` type wraps `struct blk_mq_queue_map` and provides methods to set up the mapping between CPUs and hardware queues. `QueueType` represents the different queue types: default, read, and poll queues. Signed-off-by: Andreas Hindborg --- rust/kernel/block/mq.rs | 1 + rust/kernel/block/mq/operations.rs | 10 ++-- rust/kernel/block/mq/tag_set.rs | 96 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 102 insertions(+), 5 deletions(-) diff --git a/rust/kernel/block/mq.rs b/rust/kernel/block/mq.rs index 057a5f366be3a..cd0bfbcbf317a 100644 --- a/rust/kernel/block/mq.rs +++ b/rust/kernel/block/mq.rs @@ -136,4 +136,5 @@ pub use request::Request; pub use request::RequestTimerHandle; pub use request_queue::RequestQueue; +pub use tag_set::QueueType; pub use tag_set::TagSet; diff --git a/rust/kernel/block/mq/operations.rs b/rust/kernel/block/mq/operations.rs index 017fad010d174..28dd4b28d203f 100644 --- a/rust/kernel/block/mq/operations.rs +++ b/rust/kernel/block/mq/operations.rs @@ -102,8 +102,8 @@ fn report_zones( Err(ENOTSUPP) } - /// Called by the kernel to map submission queues to CPU cores. - fn map_queues(_tag_set: &TagSet) { + /// Called by the kernel to map hardware queues to CPU cores. + fn map_queues(_tag_set: Pin<&mut TagSet>) { build_error!(crate::error::VTABLE_DEFAULT_ERROR) } } @@ -408,9 +408,9 @@ impl OperationsVTable { /// must be a pointer to a valid and initialized `TagSet`. The pointee /// must be valid for use as a reference at least the duration of this call. unsafe extern "C" fn map_queues_callback(tag_set: *mut bindings::blk_mq_tag_set) { - // SAFETY: The safety requirements of this function satiesfies the - // requirements of `TagSet::from_ptr`. - let tag_set = unsafe { TagSet::from_ptr(tag_set) }; + // SAFETY: By C API contract `tag_set` is the tag set registered with the `GenDisk` created + // by `GenDiskBuilder`. + let tag_set = unsafe { TagSet::from_ptr_mut(tag_set) }; T::map_queues(tag_set); } diff --git a/rust/kernel/block/mq/tag_set.rs b/rust/kernel/block/mq/tag_set.rs index 330ff28c91507..e6edc5bc39312 100644 --- a/rust/kernel/block/mq/tag_set.rs +++ b/rust/kernel/block/mq/tag_set.rs @@ -97,11 +97,46 @@ pub(crate) fn raw_tag_set(&self) -> *mut bindings::blk_mq_tag_set { /// `ptr` must be a pointer to a valid and initialized `TagSet`. There /// may be no other mutable references to the tag set. The pointee must be /// live and valid at least for the duration of the returned lifetime `'a`. + #[expect(dead_code)] pub(crate) unsafe fn from_ptr<'a>(ptr: *mut bindings::blk_mq_tag_set) -> &'a Self { // SAFETY: By the safety requirements of this function, `ptr` is valid // for use as a reference for the duration of `'a`. unsafe { &*(ptr.cast::()) } } + + /// Create a `TagSet` from a raw pointer. + /// + /// # Safety + /// + /// `ptr` must be a pointer to a valid and initialized `TagSet`. There + /// may be no other mutable references to the tag set. The pointee must be + /// live and valid at least for the duration of the returned lifetime `'a`. + pub(crate) unsafe fn from_ptr_mut<'a>(ptr: *mut bindings::blk_mq_tag_set) -> Pin<&'a mut Self> { + // SAFETY: By function safety requirements, `ptr` is valid for use as a mutable reference. + let mref = unsafe { &mut *(ptr.cast::()) }; + + // SAFETY: We never move out of `mref`. + unsafe { Pin::new_unchecked(mref) } + } + + /// Helper function to invoke a closure each hardware queue type supported. + /// + /// This function invokes `cb` for each variant of [`QueueType`] that this [`TagSet`] supports. + /// This is helpful for setting up CPU to hardware queue maps in the [`Operations::map_queues`] + /// callback. + pub fn update_maps(self: Pin<&mut Self>, mut cb: impl FnMut(QueueMap)) -> Result { + // SAFETY: By type invariant, `self.inner` is valid. + let nr_maps = unsafe { (*self.inner.get()).nr_maps }; + for i in 0..nr_maps { + cb(QueueMap { + // SAFETY: By type invariant, `self.inner` is valid. + map: unsafe { &raw mut (*self.inner.get()).map[i as usize] }, + kind: i.try_into()?, + }); + } + + Ok(()) + } } #[pinned_drop] @@ -125,3 +160,64 @@ unsafe impl Sync for TagSet {} // SAFETY: It is safe to share references to `TagSet` across thread boundaries. unsafe impl Send for TagSet {} + +/// A [`TagSet`] CPU to hardware queue mapping. +/// +/// # Invariants +/// +/// - `self.map` points to a valid `blk_mq_queue_map` +pub struct QueueMap { + map: *mut bindings::blk_mq_queue_map, + kind: QueueType, +} + +impl QueueMap { + /// Set the number of queues for this mapping kind. + pub fn set_queue_count(&mut self, nr_queues: u32) { + // SAFETY: By type invariant, `self.map` is valid. + unsafe { (*self.map).nr_queues = nr_queues } + } + + /// First hardware queue to map this queue kind onto. Used by the PCIe NVMe driver to map each + /// hardware queue type ([`QueueType`]) onto a distinct set of hardware queues. + pub fn set_offset(&mut self, offset: u32) { + // SAFETY: By type invariant, `self.map` is valid. + unsafe { (*self.map).queue_offset = offset } + } + + /// Effectuate the mapping described by [`Self`]. + pub fn map_queues(&self) { + // SAFETY: By type invariant, `self.map` is valid. + unsafe { bindings::blk_mq_map_queues(self.map) } + } + + /// Return the kind of this queue mapping. + pub fn kind(&self) -> QueueType { + self.kind + } +} + +/// Type of hardware queue. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[repr(u32)] +pub enum QueueType { + /// All I/O not otherwise accounted for. + Default = bindings::hctx_type_HCTX_TYPE_DEFAULT, + /// Just for READ I/O. + Read = bindings::hctx_type_HCTX_TYPE_READ, + /// Polled I/O of any kind. + Poll = bindings::hctx_type_HCTX_TYPE_POLL, +} + +impl TryFrom for QueueType { + type Error = kernel::error::Error; + + fn try_from(value: u32) -> core::result::Result { + match value { + bindings::hctx_type_HCTX_TYPE_DEFAULT => Ok(QueueType::Default), + bindings::hctx_type_HCTX_TYPE_READ => Ok(QueueType::Read), + bindings::hctx_type_HCTX_TYPE_POLL => Ok(QueueType::Poll), + _ => Err(kernel::error::code::EINVAL), + } + } +} -- 2.51.2