Add Forced Unit Access (FUA) support to rnull. When enabled via the `fua` configfs attribute, the driver advertises FUA capability and handles FUA requests by bypassing the volatile cache in the write path. FUA support requires memory backing and write cache to be enabled. Signed-off-by: Andreas Hindborg --- drivers/block/rnull/configfs.rs | 5 ++++ drivers/block/rnull/disk_storage.rs | 22 +++++++++++++--- drivers/block/rnull/disk_storage/page.rs | 1 + drivers/block/rnull/rnull.rs | 45 ++++++++++++++++++++++++-------- 4 files changed, 58 insertions(+), 15 deletions(-) diff --git a/drivers/block/rnull/configfs.rs b/drivers/block/rnull/configfs.rs index e134e21a6b564..816c057f130fc 100644 --- a/drivers/block/rnull/configfs.rs +++ b/drivers/block/rnull/configfs.rs @@ -118,6 +118,7 @@ fn make_group( zone_max_active: 25, zone_append_max_sectors: 26, poll_queues: 27, + fua: 28, ], }; @@ -158,6 +159,7 @@ fn make_group( zone_max_active: 0, zone_append_max_sectors: u32::MAX, poll_queues: 0, + fua: true, }), }), core::iter::empty(), @@ -234,6 +236,7 @@ struct DeviceConfigInner { zone_max_active: u32, zone_append_max_sectors: u32, poll_queues: u32, + fua: bool, } #[vtable] @@ -285,6 +288,7 @@ fn store(this: &DeviceConfig, page: &[u8]) -> Result { zone_max_active: guard.zone_max_active, zone_append_max_sectors: guard.zone_append_max_sectors, poll_queues: guard.poll_queues, + forced_unit_access: guard.fua, })?); guard.powered = true; } else if guard.powered && !power_op { @@ -527,3 +531,4 @@ fn store(this: &DeviceConfig, page: &[u8]) -> Result { } } ); +configfs_simple_bool_field!(DeviceConfig, 28, fua); diff --git a/drivers/block/rnull/disk_storage.rs b/drivers/block/rnull/disk_storage.rs index a613ed5223ba7..b2b5eaa783cdc 100644 --- a/drivers/block/rnull/disk_storage.rs +++ b/drivers/block/rnull/disk_storage.rs @@ -92,6 +92,10 @@ pub(crate) fn flush(&self, hw_data: &Pin<&SpinLock>) -> Result { let mut access = self.access(&mut tree_guard, &mut hw_data_guard, None); access.flush() } + + pub(crate) fn cache_enabled(&self) -> bool { + self.cache_size > 0 + } } pub(crate) struct DiskStorageAccess<'a, 'b, 'c> { @@ -190,7 +194,7 @@ fn flush(&mut self) -> Result { Ok(()) } - fn get_cache_page(&mut self, sector: u64) -> Result<&mut NullBlockPage> { + fn get_or_alloc_cache_page(&mut self, sector: u64) -> Result<&mut NullBlockPage> { let index = Self::to_index(sector); if self.cache_guard.contains_index(index) { @@ -215,6 +219,12 @@ fn get_cache_page(&mut self, sector: u64) -> Result<&mut NullBlockPage> { } } + pub(crate) fn get_cache_page(&mut self, sector: u64) -> Option<&mut NullBlockPage> { + let index = Self::to_index(sector); + + self.cache_guard.get_mut(index) + } + fn get_disk_page(&mut self, sector: u64) -> Result<&mut NullBlockPage> { let index = Self::to_index(sector); @@ -232,9 +242,13 @@ fn get_disk_page(&mut self, sector: u64) -> Result<&mut NullBlockPage> { Ok(page) } - pub(crate) fn get_write_page(&mut self, sector: u64) -> Result<&mut NullBlockPage> { - let page = if self.disk_storage.cache_size > 0 { - self.get_cache_page(sector)? + pub(crate) fn get_write_page( + &mut self, + sector: u64, + bypass_cache: bool, + ) -> Result<&mut NullBlockPage> { + let page = if self.disk_storage.cache_size > 0 && !bypass_cache { + self.get_or_alloc_cache_page(sector)? } else { self.get_disk_page(sector)? }; diff --git a/drivers/block/rnull/disk_storage/page.rs b/drivers/block/rnull/disk_storage/page.rs index a34fe0762724d..728073d5dd23d 100644 --- a/drivers/block/rnull/disk_storage/page.rs +++ b/drivers/block/rnull/disk_storage/page.rs @@ -14,6 +14,7 @@ uapi::PAGE_SECTORS, // }; +// TODO: Use rust bitmap const _CHEKC_STATUS_WIDTH: () = build_assert!((PAGE_SIZE >> SECTOR_SHIFT) <= 64); pub(crate) struct NullBlockPage { diff --git a/drivers/block/rnull/rnull.rs b/drivers/block/rnull/rnull.rs index 4870aa3b7a53e..3b7edfe7efe44 100644 --- a/drivers/block/rnull/rnull.rs +++ b/drivers/block/rnull/rnull.rs @@ -193,6 +193,10 @@ default: 0, description: "Number of IOPOLL submission queues.", }, + fua: u8 { + default: 1, + description: "Enable/disable FUA support when cache_size is used. Default: 1 (true)", + }, }, } @@ -250,6 +254,7 @@ fn init(_module: &'static ThisModule) -> impl PinInit { zone_max_active: *module_parameters::zone_max_active.value(), zone_append_max_sectors: *module_parameters::zone_append_max_sectors.value(), poll_queues: *module_parameters::poll_queues.value(), + forced_unit_access: *module_parameters::fua.value() != 0, })?; disks.push(disk, GFP_KERNEL)?; } @@ -298,6 +303,7 @@ struct NullBlkOptions<'a> { #[cfg_attr(not(CONFIG_BLK_DEV_ZONED), expect(unused_variables))] zone_append_max_sectors: u32, poll_queues: u32, + forced_unit_access: bool, } static SHARED_TAG_SET: SetOnce>> = SetOnce::new(); @@ -356,13 +362,11 @@ fn new(options: NullBlkOptions<'_>) -> Result>> { zone_max_active, zone_append_max_sectors, poll_queues, + forced_unit_access, } = options; let mut flags = mq::tag_set::Flags::default(); - // TODO: lim.features |= BLK_FEAT_WRITE_CACHE; - // if (dev->fua) - // lim.features |= BLK_FEAT_FUA; if blocking || memory_backed { flags |= mq::tag_set::Flag::Blocking; } @@ -404,9 +408,10 @@ fn new(options: NullBlkOptions<'_>) -> Result>> { let device_capacity_sectors = mib_to_sectors(device_capacity_mib); + let s = storage.clone(); let queue_data = Arc::try_pin_init( try_pin_init!(Self { - storage, + storage: s, irq_mode, completion_time, memory_backed, @@ -439,7 +444,9 @@ fn new(options: NullBlkOptions<'_>) -> Result>> { .capacity_sectors(device_capacity_sectors) .logical_block_size(block_size_bytes)? .physical_block_size(block_size_bytes)? - .rotational(rotational); + .rotational(rotational) + .write_cache(storage.cache_enabled()) + .forced_unit_access(forced_unit_access && storage.cache_enabled()); #[cfg(CONFIG_BLK_DEV_ZONED)] { @@ -496,6 +503,7 @@ fn write<'a, 'b, 'c>( hw_data_guard: &'b mut SpinLockGuard<'c, HwQueueContext>, mut sector: u64, mut segment: Segment<'_>, + bypass_cache: bool, ) -> Result { let mut sheaf: Option> = None; @@ -524,12 +532,21 @@ fn write<'a, 'b, 'c>( let mut access = self.storage.access(tree_guard, hw_data_guard, sheaf); - let page = access.get_write_page(sector)?; + if bypass_cache { + if let Some(page) = access.get_cache_page(sector) { + page.set_free(sector); + } + } + + let page = access.get_write_page(sector, bypass_cache)?; page.set_occupied(sector); let page_offset = (sector & u64::from(block::SECTOR_MASK)) << block::SECTOR_SHIFT; - sector += segment.copy_to_page(page.page_mut().get_pin_mut(), page_offset as usize) - as u64 + sector += segment.copy_to_page_limit( + page.page_mut().get_pin_mut(), + page_offset as usize, + self.block_size_bytes.try_into()?, + ) as u64 >> block::SECTOR_SHIFT; sheaf = access.sheaf; @@ -588,6 +605,8 @@ fn transfer( let mut hw_data_guard = hw_data.lock(); let mut tree_guard = self.storage.lock(); + let skip_cache = rq.flags().contains(mq::RequestFlag::ForcedUnitAccess); + for bio in rq.bio_iter_mut() { let segment_iter = bio.segment_iter(); for segment in segment_iter { @@ -596,9 +615,13 @@ fn transfer( .len() .min((end_sector - sector) as u32 >> SECTOR_SHIFT); match command { - mq::Command::Write => { - self.write(&mut tree_guard, &mut hw_data_guard, sector, segment)? - } + mq::Command::Write => self.write( + &mut tree_guard, + &mut hw_data_guard, + sector, + segment, + skip_cache, + )?, mq::Command::Read => { self.read(&mut tree_guard, &mut hw_data_guard, sector, segment)? } -- 2.51.2