Add support for the badblocks_once feature, which automatically clears bad blocks after they are encountered during I/O operations. This matches the functionality in the C null_blk driver. When badblocks_once is enabled: - Bad blocks are checked during I/O requests as usual - If a bad block is encountered, the I/O is marked as failed - The bad block range is immediately cleared from the bad blocks table - Subsequent I/O to the same sectors will succeed This feature is useful for testing scenarios where bad blocks are transient or where devices can recover from bad sectors after a single access attempt. The feature is configurable via the configfs badblocks_once attribute and disabled by default, maintaining compatibility with existing behavior. Signed-off-by: Andreas Hindborg --- drivers/block/rnull/configfs.rs | 31 +++++++++++++++++++++++++++++++ drivers/block/rnull/rnull.rs | 21 +++++++++++++++------ 2 files changed, 46 insertions(+), 6 deletions(-) diff --git a/drivers/block/rnull/configfs.rs b/drivers/block/rnull/configfs.rs index 61a76addf468b..a39691b39e374 100644 --- a/drivers/block/rnull/configfs.rs +++ b/drivers/block/rnull/configfs.rs @@ -103,6 +103,7 @@ fn make_group( discard: 10, no_sched:11, badblocks: 12, + badblocks_once: 13, ], }; @@ -126,6 +127,7 @@ fn make_group( discard: false, no_sched: false, bad_blocks: Arc::pin_init(BadBlocks::new(false), GFP_KERNEL)?, + bad_blocks_once: false, }), }), core::iter::empty(), @@ -186,6 +188,7 @@ struct DeviceConfigInner { discard: bool, no_sched: bool, bad_blocks: Arc, + bad_blocks_once: bool, } #[vtable] @@ -222,6 +225,7 @@ fn store(this: &DeviceConfig, page: &[u8]) -> Result { discard: guard.discard, no_sched: guard.no_sched, bad_blocks: guard.bad_blocks.clone(), + bad_blocks_once: guard.bad_blocks_once, })?); guard.powered = true; } else if guard.powered && !power_op { @@ -422,3 +426,30 @@ fn store(this: &DeviceConfig, page: &[u8]) -> Result { Ok(()) } } + +#[vtable] +impl configfs::AttributeOperations<13> for DeviceConfig { + type Data = DeviceConfig; + + fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result { + let mut writer = kernel::str::Formatter::new(page); + + if this.data.lock().bad_blocks_once { + writer.write_str("1\n")?; + } else { + writer.write_str("0\n")?; + } + + Ok(writer.bytes_written()) + } + + fn store(this: &DeviceConfig, page: &[u8]) -> Result { + if this.data.lock().powered { + return Err(EBUSY); + } + + this.data.lock().bad_blocks_once = kstrtobool_bytes(page)?; + + Ok(()) + } +} diff --git a/drivers/block/rnull/rnull.rs b/drivers/block/rnull/rnull.rs index 861392c5b5841..0f569c5b65f77 100644 --- a/drivers/block/rnull/rnull.rs +++ b/drivers/block/rnull/rnull.rs @@ -161,6 +161,7 @@ fn init(_module: &'static ThisModule) -> impl PinInit { discard: *module_parameters::discard.value() != 0, no_sched: *module_parameters::no_sched.value() != 0, bad_blocks: Arc::pin_init(BadBlocks::new(false), GFP_KERNEL)?, + bad_blocks_once: false, })?; disks.push(disk, GFP_KERNEL)?; } @@ -188,6 +189,7 @@ struct NullBlkOptions<'a> { discard: bool, no_sched: bool, bad_blocks: Arc, + bad_blocks_once: bool, } struct NullBlkDevice; @@ -206,6 +208,7 @@ fn new(options: NullBlkOptions<'_>) -> Result> { discard, no_sched, bad_blocks, + bad_blocks_once, } = options; let mut flags = mq::tag_set::Flags::default(); @@ -235,6 +238,7 @@ fn new(options: NullBlkOptions<'_>) -> Result> { memory_backed, block_size: block_size.into(), bad_blocks, + bad_blocks_once, }), GFP_KERNEL, )?; @@ -393,6 +397,7 @@ struct QueueData { memory_backed: bool, block_size: u64, bad_blocks: Arc, + bad_blocks_once: bool, } #[pin_data] @@ -442,12 +447,16 @@ fn queue_rq( if queue_data.bad_blocks.enabled() { let start = rq.sector(); let end = start + u64::from(rq.sectors()); - if !matches!( - queue_data.bad_blocks.check(start..end), - badblocks::BlockStatus::None - ) { - rq.data_ref().error.store(1, ordering::Relaxed); - } + match queue_data.bad_blocks.check(start..end) { + badblocks::BlockStatus::None => {} + badblocks::BlockStatus::Acknowledged(range) + | badblocks::BlockStatus::Unacknowledged(range) => { + rq.data_ref().error.store(1, ordering::Relaxed); + if queue_data.bad_blocks_once { + queue_data.bad_blocks.set_good(range)?; + } + } + }; } // TODO: Skip IO if bad block. -- 2.51.2