In commit 63d092d1c1b1 ("block: use chunk_sectors when evaluating stacked atomic write limits"), it was missed to use a chunk sectors limit check in blk_stack_atomic_writes_boundary_head(), so update that function to do the proper check. Fixes: 63d092d1c1b1 ("block: use chunk_sectors when evaluating stacked atomic write limits") Signed-off-by: John Garry --- block/blk-settings.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/block/blk-settings.c b/block/blk-settings.c index 693bc8d20acf3..6760dbf130b24 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -643,18 +643,24 @@ static bool blk_stack_atomic_writes_tail(struct queue_limits *t, static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t, struct queue_limits *b) { + unsigned int boundary_sectors; + + if (!b->atomic_write_hw_boundary || !t->chunk_sectors) + return true; + + boundary_sectors = b->atomic_write_hw_boundary >> SECTOR_SHIFT; + /* * Ensure atomic write boundary is aligned with chunk sectors. Stacked - * devices store chunk sectors in t->io_min. + * devices store any stripe size in t->chunk_sectors. */ - if (b->atomic_write_hw_boundary > t->io_min && - b->atomic_write_hw_boundary % t->io_min) + if (boundary_sectors > t->chunk_sectors && + boundary_sectors % t->chunk_sectors) return false; - if (t->io_min > b->atomic_write_hw_boundary && - t->io_min % b->atomic_write_hw_boundary) + if (t->chunk_sectors > boundary_sectors && + t->chunk_sectors % boundary_sectors) return false; - t->atomic_write_hw_boundary = b->atomic_write_hw_boundary; return true; } @@ -695,13 +701,13 @@ static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t) static bool blk_stack_atomic_writes_head(struct queue_limits *t, struct queue_limits *b) { - if (b->atomic_write_hw_boundary && - !blk_stack_atomic_writes_boundary_head(t, b)) + if (!blk_stack_atomic_writes_boundary_head(t, b)) return false; t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max; t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min; t->atomic_write_hw_max = b->atomic_write_hw_max; + t->atomic_write_hw_boundary = b->atomic_write_hw_boundary; return true; } -- 2.43.5 Atomic writes support may not always be possible when stacking devices which support atomic writes. Such as case is a different atomic write boundary between stacked devices (which is not supported). In the case that atomic writes cannot supported, the top device queue HW limits are set to 0. However, in blk_stack_atomic_writes_limits(), we detect that we are stacking the first bottom device by checking the top device atomic_write_hw_max value == 0. This get confused with the case of atomic writes not supported, above. Make the distinction between stacking the first bottom device and no atomics supported by initializing stacked device atomic_write_hw_max = UINT_MAX and checking that for stacking the first bottom device. Fixes: d7f36dc446e8 ("block: Support atomic writes limits for stacked devices") Signed-off-by: John Garry --- block/blk-settings.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/block/blk-settings.c b/block/blk-settings.c index 6760dbf130b24..8fa52914e16b0 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -56,6 +56,7 @@ void blk_set_stacking_limits(struct queue_limits *lim) lim->max_user_wzeroes_unmap_sectors = UINT_MAX; lim->max_hw_zone_append_sectors = UINT_MAX; lim->max_user_discard_sectors = UINT_MAX; + lim->atomic_write_hw_max = UINT_MAX; } EXPORT_SYMBOL(blk_set_stacking_limits); @@ -232,6 +233,10 @@ static void blk_validate_atomic_write_limits(struct queue_limits *lim) if (!(lim->features & BLK_FEAT_ATOMIC_WRITES)) goto unsupported; + /* UINT_MAX indicates stacked limits in initial state */ + if (lim->atomic_write_hw_max == UINT_MAX) + goto unsupported; + if (!lim->atomic_write_hw_max) goto unsupported; @@ -723,18 +728,14 @@ static void blk_stack_atomic_writes_limits(struct queue_limits *t, if (!blk_atomic_write_start_sect_aligned(start, b)) goto unsupported; - /* - * If atomic_write_hw_max is set, we have already stacked 1x bottom - * device, so check for compliance. - */ - if (t->atomic_write_hw_max) { + /* UINT_MAX indicates no stacking of bottom devices yet */ + if (t->atomic_write_hw_max == UINT_MAX) { + if (!blk_stack_atomic_writes_head(t, b)) + goto unsupported; + } else { if (!blk_stack_atomic_writes_tail(t, b)) goto unsupported; - return; } - - if (!blk_stack_atomic_writes_head(t, b)) - goto unsupported; blk_stack_atomic_writes_chunk_sectors(t); return; -- 2.43.5 blk_validate_atomic_write_limits() ensures that any boundary fits into and is aligned to any chunk size. However, it should also be possible to fit the chunk size into any boundary. That check is already made in blk_stack_atomic_writes_boundary_head(). Relax the check in blk_validate_atomic_write_limits() by reusing (and renaming) blk_stack_atomic_writes_boundary_head(). Signed-off-by: John Garry --- block/blk-settings.c | 66 +++++++++++++++++--------------------------- 1 file changed, 26 insertions(+), 40 deletions(-) diff --git a/block/blk-settings.c b/block/blk-settings.c index 8fa52914e16b0..54cffaae4df49 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -224,6 +224,27 @@ static void blk_atomic_writes_update_limits(struct queue_limits *lim) lim->atomic_write_hw_boundary >> SECTOR_SHIFT; } +/* + * Test whether any boundary is aligned with any chunk size. Stacked + * devices store any stripe size in t->chunk_sectors. + */ +static bool blk_valid_atomic_writes_boundary(unsigned int chunk_sectors, + unsigned int boundary_sectors) +{ + if (!chunk_sectors || !boundary_sectors) + return true; + + if (boundary_sectors > chunk_sectors && + boundary_sectors % chunk_sectors) + return false; + + if (chunk_sectors > boundary_sectors && + chunk_sectors % boundary_sectors) + return false; + + return true; +} + static void blk_validate_atomic_write_limits(struct queue_limits *lim) { unsigned int boundary_sectors; @@ -264,20 +285,9 @@ static void blk_validate_atomic_write_limits(struct queue_limits *lim) if (WARN_ON_ONCE(lim->atomic_write_hw_max > lim->atomic_write_hw_boundary)) goto unsupported; - /* - * A feature of boundary support is that it disallows bios to - * be merged which would result in a merged request which - * crosses either a chunk sector or atomic write HW boundary, - * even though chunk sectors may be just set for performance. - * For simplicity, disallow atomic writes for a chunk sector - * which is non-zero and smaller than atomic write HW boundary. - * Furthermore, chunk sectors must be a multiple of atomic - * write HW boundary. Otherwise boundary support becomes - * complicated. - * Devices which do not conform to these rules can be dealt - * with if and when they show up. - */ - if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors)) + + if (WARN_ON_ONCE(!blk_valid_atomic_writes_boundary( + lim->chunk_sectors, boundary_sectors))) goto unsupported; /* @@ -644,31 +654,6 @@ static bool blk_stack_atomic_writes_tail(struct queue_limits *t, return true; } -/* Check for valid boundary of first bottom device */ -static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t, - struct queue_limits *b) -{ - unsigned int boundary_sectors; - - if (!b->atomic_write_hw_boundary || !t->chunk_sectors) - return true; - - boundary_sectors = b->atomic_write_hw_boundary >> SECTOR_SHIFT; - - /* - * Ensure atomic write boundary is aligned with chunk sectors. Stacked - * devices store any stripe size in t->chunk_sectors. - */ - if (boundary_sectors > t->chunk_sectors && - boundary_sectors % t->chunk_sectors) - return false; - if (t->chunk_sectors > boundary_sectors && - t->chunk_sectors % boundary_sectors) - return false; - - return true; -} - static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t) { unsigned int chunk_bytes; @@ -706,7 +691,8 @@ static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t) static bool blk_stack_atomic_writes_head(struct queue_limits *t, struct queue_limits *b) { - if (!blk_stack_atomic_writes_boundary_head(t, b)) + if (!blk_valid_atomic_writes_boundary(t->chunk_sectors, + b->atomic_write_hw_boundary >> SECTOR_SHIFT)) return false; t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max; -- 2.43.5