This value represents the number of requests for elevator tags, or drivers tags if elevator is none. The max value for elevator tags is 2048, and in drivers at most 16 bits is used for tag. Signed-off-by: Yu Kuai Reviewed-by: Nilay Shroff --- include/linux/blkdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cb4ba09959ee..cdc68c41fa96 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -551,7 +551,7 @@ struct request_queue { /* * queue settings */ - unsigned long nr_requests; /* Max # of requests */ + unsigned int nr_requests; /* Max # of requests */ #ifdef CONFIG_BLK_INLINE_ENCRYPTION struct blk_crypto_profile *crypto_profile; -- 2.51.0 bfq and mq-deadline consider sync writes as async requests and only reserve tags for sync reads by async_depth, however, kyber doesn't consider sync writes as async requests for now. Consider the case there are lots of dirty pages, and user use fsync to flush dirty pages. In this case sched_tags can be exhausted by sync writes and sync reads can stuck waiting for tag. Hence let kyber follow what mq-deadline and bfq did, and unify async requests checking for all elevators. Signed-off-by: Yu Kuai Reviewed-by: Nilay Shroff --- block/bfq-iosched.c | 2 +- block/blk-mq-sched.h | 5 +++++ block/kyber-iosched.c | 2 +- block/mq-deadline.c | 2 +- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 4a8d3d96bfe4..35f1a5de48f3 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -697,7 +697,7 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) unsigned int limit, act_idx; /* Sync reads have full depth available */ - if (op_is_sync(opf) && !op_is_write(opf)) + if (blk_mq_is_sync_read(opf)) limit = data->q->nr_requests; else limit = bfqd->async_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)]; diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 02c40a72e959..5678e15bd33c 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -137,4 +137,9 @@ static inline void blk_mq_set_min_shallow_depth(struct request_queue *q, depth); } +static inline bool blk_mq_is_sync_read(blk_opf_t opf) +{ + return op_is_sync(opf) && !op_is_write(opf); +} + #endif diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index c1b36ffd19ce..2b3f5b8959af 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -556,7 +556,7 @@ static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) * We use the scheduler tags as per-hardware queue queueing tokens. * Async requests can be limited at this stage. */ - if (!op_is_sync(opf)) { + if (!blk_mq_is_sync_read(opf)) { struct kyber_queue_data *kqd = data->q->elevator->elevator_data; data->shallow_depth = kqd->async_depth; diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 3e3719093aec..29d00221fbea 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -495,7 +495,7 @@ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) struct deadline_data *dd = data->q->elevator->elevator_data; /* Do not throttle synchronous reads. */ - if (op_is_sync(opf) && !op_is_write(opf)) + if (blk_mq_is_sync_read(opf)) return; /* -- 2.51.0 There are no functional changes, just make code cleaner. Signed-off-by: Yu Kuai --- block/blk-mq.c | 62 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 25 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index f2650c97a75e..6c505ebfab65 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -497,6 +497,42 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data) return rq_list_pop(data->cached_rqs); } +static void blk_mq_limit_depth(struct blk_mq_alloc_data *data) +{ + struct elevator_mq_ops *ops; + + /* If no I/O scheduler has been configured, don't limit requests */ + if (!data->q->elevator) { + blk_mq_tag_busy(data->hctx); + return; + } + + /* + * All requests use scheduler tags when an I/O scheduler is + * enabled for the queue. + */ + data->rq_flags |= RQF_SCHED_TAGS; + + /* + * Flush/passthrough requests are special and go directly to the + * dispatch list, they are not subject to the async_depth limit. + */ + if ((data->cmd_flags & REQ_OP_MASK) == REQ_OP_FLUSH || + blk_op_is_passthrough(data->cmd_flags)) + return; + + WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED); + data->rq_flags |= RQF_USE_SCHED; + + /* + * By default, sync requests have no limit, and async requests are + * limited to async_depth. + */ + ops = &data->q->elevator->type->ops; + if (ops->limit_depth) + ops->limit_depth(data->cmd_flags, data); +} + static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) { struct request_queue *q = data->q; @@ -515,31 +551,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) data->ctx = blk_mq_get_ctx(q); data->hctx = blk_mq_map_queue(data->cmd_flags, data->ctx); - if (q->elevator) { - /* - * All requests use scheduler tags when an I/O scheduler is - * enabled for the queue. - */ - data->rq_flags |= RQF_SCHED_TAGS; - - /* - * Flush/passthrough requests are special and go directly to the - * dispatch list. - */ - if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH && - !blk_op_is_passthrough(data->cmd_flags)) { - struct elevator_mq_ops *ops = &q->elevator->type->ops; - - WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED); - - data->rq_flags |= RQF_USE_SCHED; - if (ops->limit_depth) - ops->limit_depth(data->cmd_flags, data); - } - } else { - blk_mq_tag_busy(data->hctx); - } - + blk_mq_limit_depth(data); if (data->flags & BLK_MQ_REQ_RESERVED) data->rq_flags |= RQF_RESV; -- 2.51.0 Add a new field async_depth to request_queue and related APIs, this is currently not used, following patches will convert elevators to use this instead of internal async_depth. Signed-off-by: Yu Kuai Reviewed-by: Nilay Shroff --- block/blk-core.c | 1 + block/blk-mq.c | 6 ++++++ block/blk-sysfs.c | 42 ++++++++++++++++++++++++++++++++++++++++++ block/elevator.c | 1 + include/linux/blkdev.h | 1 + 5 files changed, 51 insertions(+) diff --git a/block/blk-core.c b/block/blk-core.c index 14ae73eebe0d..cc5c9ced8e6f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -463,6 +463,7 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id) fs_reclaim_release(GFP_KERNEL); q->nr_requests = BLKDEV_DEFAULT_RQ; + q->async_depth = BLKDEV_DEFAULT_RQ; return q; diff --git a/block/blk-mq.c b/block/blk-mq.c index 6c505ebfab65..ae6ce68f4786 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4628,6 +4628,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, spin_lock_init(&q->requeue_lock); q->nr_requests = set->queue_depth; + q->async_depth = set->queue_depth; blk_mq_init_cpu_queues(q, set->nr_hw_queues); blk_mq_map_swqueue(q); @@ -4994,6 +4995,11 @@ struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q, q->elevator->et = et; } + /* + * Preserve relative value, both nr and async_depth are at most 16 bit + * value, no need to worry about overflow. + */ + q->async_depth = max(q->async_depth * nr / q->nr_requests, 1); q->nr_requests = nr; if (q->elevator && q->elevator->type->ops.depth_updated) q->elevator->type->ops.depth_updated(q); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 8684c57498cc..5c2d29ac6570 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -127,6 +127,46 @@ queue_requests_store(struct gendisk *disk, const char *page, size_t count) return ret; } +static ssize_t queue_async_depth_show(struct gendisk *disk, char *page) +{ + guard(mutex)(&disk->queue->elevator_lock); + + return queue_var_show(disk->queue->async_depth, page); +} + +static ssize_t +queue_async_depth_store(struct gendisk *disk, const char *page, size_t count) +{ + struct request_queue *q = disk->queue; + unsigned int memflags; + unsigned long nr; + int ret; + + if (!queue_is_mq(q)) + return -EINVAL; + + ret = queue_var_store(&nr, page, count); + if (ret < 0) + return ret; + + if (nr == 0) + return -EINVAL; + + memflags = blk_mq_freeze_queue(q); + scoped_guard(mutex, &q->elevator_lock) { + if (q->elevator) { + q->async_depth = min(q->nr_requests, nr); + if (q->elevator->type->ops.depth_updated) + q->elevator->type->ops.depth_updated(q); + } else { + ret = -EINVAL; + } + } + blk_mq_unfreeze_queue(q, memflags); + + return ret; +} + static ssize_t queue_ra_show(struct gendisk *disk, char *page) { ssize_t ret; @@ -532,6 +572,7 @@ static struct queue_sysfs_entry _prefix##_entry = { \ } QUEUE_RW_ENTRY(queue_requests, "nr_requests"); +QUEUE_RW_ENTRY(queue_async_depth, "async_depth"); QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); QUEUE_LIM_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); @@ -754,6 +795,7 @@ static struct attribute *blk_mq_queue_attrs[] = { */ &elv_iosched_entry.attr, &queue_requests_entry.attr, + &queue_async_depth_entry.attr, #ifdef CONFIG_BLK_WBT &queue_wb_lat_entry.attr, #endif diff --git a/block/elevator.c b/block/elevator.c index 5b37ef44f52d..5ff21075a84a 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -589,6 +589,7 @@ static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx) blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q); q->elevator = NULL; q->nr_requests = q->tag_set->queue_depth; + q->async_depth = q->tag_set->queue_depth; } blk_add_trace_msg(q, "elv switch: %s", ctx->name); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cdc68c41fa96..edddf17f8304 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -552,6 +552,7 @@ struct request_queue { * queue settings */ unsigned int nr_requests; /* Max # of requests */ + unsigned int async_depth; /* Max # of async requests */ #ifdef CONFIG_BLK_INLINE_ENCRYPTION struct blk_crypto_profile *crypto_profile; -- 2.51.0 Instead of the internal async_depth, remove kqd->async_depth and related helpers. Noted elevator attribute async_depth is now removed, queue attribute with the same name is used instead. Signed-off-by: Yu Kuai Reviewed-by: Nilay Shroff --- block/kyber-iosched.c | 33 +++++---------------------------- 1 file changed, 5 insertions(+), 28 deletions(-) diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index 2b3f5b8959af..b84163d1f851 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -47,9 +47,8 @@ enum { * asynchronous requests, we reserve 25% of requests for synchronous * operations. */ - KYBER_ASYNC_PERCENT = 75, + KYBER_DEFAULT_ASYNC_PERCENT = 75, }; - /* * Maximum device-wide depth for each scheduling domain. * @@ -157,9 +156,6 @@ struct kyber_queue_data { */ struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS]; - /* Number of allowed async requests. */ - unsigned int async_depth; - struct kyber_cpu_latency __percpu *cpu_latency; /* Timer for stats aggregation and adjusting domain tokens. */ @@ -401,10 +397,7 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q) static void kyber_depth_updated(struct request_queue *q) { - struct kyber_queue_data *kqd = q->elevator->elevator_data; - - kqd->async_depth = q->nr_requests * KYBER_ASYNC_PERCENT / 100U; - blk_mq_set_min_shallow_depth(q, kqd->async_depth); + blk_mq_set_min_shallow_depth(q, q->async_depth); } static int kyber_init_sched(struct request_queue *q, struct elevator_queue *eq) @@ -414,6 +407,7 @@ static int kyber_init_sched(struct request_queue *q, struct elevator_queue *eq) blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q); q->elevator = eq; + q->async_depth = q->nr_requests * KYBER_DEFAULT_ASYNC_PERCENT / 100; kyber_depth_updated(q); return 0; @@ -552,15 +546,8 @@ static void rq_clear_domain_token(struct kyber_queue_data *kqd, static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) { - /* - * We use the scheduler tags as per-hardware queue queueing tokens. - * Async requests can be limited at this stage. - */ - if (!blk_mq_is_sync_read(opf)) { - struct kyber_queue_data *kqd = data->q->elevator->elevator_data; - - data->shallow_depth = kqd->async_depth; - } + if (!blk_mq_is_sync_read(opf)) + data->shallow_depth = data->q->async_depth; } static bool kyber_bio_merge(struct request_queue *q, struct bio *bio, @@ -956,15 +943,6 @@ KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard) KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other) #undef KYBER_DEBUGFS_DOMAIN_ATTRS -static int kyber_async_depth_show(void *data, struct seq_file *m) -{ - struct request_queue *q = data; - struct kyber_queue_data *kqd = q->elevator->elevator_data; - - seq_printf(m, "%u\n", kqd->async_depth); - return 0; -} - static int kyber_cur_domain_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; @@ -990,7 +968,6 @@ static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = { KYBER_QUEUE_DOMAIN_ATTRS(write), KYBER_QUEUE_DOMAIN_ATTRS(discard), KYBER_QUEUE_DOMAIN_ATTRS(other), - {"async_depth", 0400, kyber_async_depth_show}, {}, }; #undef KYBER_QUEUE_DOMAIN_ATTRS -- 2.51.0 In downstream kernel, we test with mq-deadline with many fio workloads, and we found a performance regression after commit 39823b47bbd4 ("block/mq-deadline: Fix the tag reservation code") with following test: [global] rw=randread direct=1 ramp_time=1 ioengine=libaio iodepth=1024 numjobs=24 bs=1024k group_reporting=1 runtime=60 [job1] filename=/dev/sda Root cause is that mq-deadline now support configuring async_depth, although the default value is nr_request, however the minimal value is 1, hence min_shallow_depth is set to 1, causing wake_batch to be 1. For consequence, sbitmap_queue will be waken up after each IO instead of 8 IO. In this test case, sda is HDD and max_sectors is 128k, hence each submitted 1M io will be splited into 8 sequential 128k requests, however due to there are 24 jobs and total tags are exhausted, the 8 requests are unlikely to be dispatched sequentially, and changing wake_batch to 1 will make this much worse, accounting blktrace D stage, the percentage of sequential io is decreased from 8% to 0.8%. Fix this problem by converting to request_queue->async_depth, where min_shallow_depth is set each time async_depth is updated. Noted elevator attribute async_depth is now removed, queue attribute with the same name is used instead. Fixes: 39823b47bbd4 ("block/mq-deadline: Fix the tag reservation code") Signed-off-by: Yu Kuai Reviewed-by: Nilay Shroff Reviewed-by: Bart Van Assche --- block/mq-deadline.c | 39 +++++---------------------------------- 1 file changed, 5 insertions(+), 34 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 29d00221fbea..95917a88976f 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -98,7 +98,6 @@ struct deadline_data { int fifo_batch; int writes_starved; int front_merges; - u32 async_depth; int prio_aging_expire; spinlock_t lock; @@ -486,32 +485,16 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) return rq; } -/* - * Called by __blk_mq_alloc_request(). The shallow_depth value set by this - * function is used by __blk_mq_get_tag(). - */ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) { - struct deadline_data *dd = data->q->elevator->elevator_data; - - /* Do not throttle synchronous reads. */ - if (blk_mq_is_sync_read(opf)) - return; - - /* - * Throttle asynchronous requests and writes such that these requests - * do not block the allocation of synchronous requests. - */ - data->shallow_depth = dd->async_depth; + if (!blk_mq_is_sync_read(opf)) + data->shallow_depth = data->q->async_depth; } -/* Called by blk_mq_update_nr_requests(). */ +/* Called by blk_mq_init_sched() and blk_mq_update_nr_requests(). */ static void dd_depth_updated(struct request_queue *q) { - struct deadline_data *dd = q->elevator->elevator_data; - - dd->async_depth = q->nr_requests; - blk_mq_set_min_shallow_depth(q, 1); + blk_mq_set_min_shallow_depth(q, q->async_depth); } static void dd_exit_sched(struct elevator_queue *e) @@ -576,6 +559,7 @@ static int dd_init_sched(struct request_queue *q, struct elevator_queue *eq) blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q); q->elevator = eq; + q->async_depth = q->nr_requests; dd_depth_updated(q); return 0; } @@ -763,7 +747,6 @@ SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire); SHOW_INT(deadline_writes_starved_show, dd->writes_starved); SHOW_INT(deadline_front_merges_show, dd->front_merges); -SHOW_INT(deadline_async_depth_show, dd->async_depth); SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch); #undef SHOW_INT #undef SHOW_JIFFIES @@ -793,7 +776,6 @@ STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MA STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX); STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); -STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX); STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX); #undef STORE_FUNCTION #undef STORE_INT @@ -807,7 +789,6 @@ static const struct elv_fs_entry deadline_attrs[] = { DD_ATTR(write_expire), DD_ATTR(writes_starved), DD_ATTR(front_merges), - DD_ATTR(async_depth), DD_ATTR(fifo_batch), DD_ATTR(prio_aging_expire), __ATTR_NULL @@ -894,15 +875,6 @@ static int deadline_starved_show(void *data, struct seq_file *m) return 0; } -static int dd_async_depth_show(void *data, struct seq_file *m) -{ - struct request_queue *q = data; - struct deadline_data *dd = q->elevator->elevator_data; - - seq_printf(m, "%u\n", dd->async_depth); - return 0; -} - static int dd_queued_show(void *data, struct seq_file *m) { struct request_queue *q = data; @@ -1002,7 +974,6 @@ static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { DEADLINE_NEXT_RQ_ATTR(write2), {"batching", 0400, deadline_batching_show}, {"starved", 0400, deadline_starved_show}, - {"async_depth", 0400, dd_async_depth_show}, {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops}, {"owned_by_driver", 0400, dd_owned_by_driver_show}, {"queued", 0400, dd_queued_show}, -- 2.51.0 The default limits is unchanged, and user can configure async_depth now. Signed-off-by: Yu Kuai Reviewed-by: Nilay Shroff --- block/bfq-iosched.c | 43 +++++++++++++++++-------------------------- 1 file changed, 17 insertions(+), 26 deletions(-) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 35f1a5de48f3..9d04bd0cc49b 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -7112,39 +7112,29 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) static void bfq_depth_updated(struct request_queue *q) { struct bfq_data *bfqd = q->elevator->elevator_data; - unsigned int nr_requests = q->nr_requests; + unsigned int async_depth = q->async_depth; /* - * In-word depths if no bfq_queue is being weight-raised: - * leaving 25% of tags only for sync reads. + * By default: + * - sync reads are not limited + * If bfqq is not being weight-raised: + * - sync writes are limited to 75%(async depth default value) + * - async IO are limited to 50% + * If bfqq is being weight-raised: + * - sync writes are limited to ~37% + * - async IO are limited to ~18 * - * In next formulas, right-shift the value - * (1U<sb.shift), instead of computing directly - * (1U<<(bt->sb.shift - something)), to be robust against - * any possible value of bt->sb.shift, without having to - * limit 'something'. + * If request_queue->async_depth is updated by user, all limit are + * updated relatively. */ - /* no more than 50% of tags for async I/O */ - bfqd->async_depths[0][0] = max(nr_requests >> 1, 1U); - /* - * no more than 75% of tags for sync writes (25% extra tags - * w.r.t. async I/O, to prevent async I/O from starving sync - * writes) - */ - bfqd->async_depths[0][1] = max((nr_requests * 3) >> 2, 1U); + bfqd->async_depths[0][1] = async_depth; + bfqd->async_depths[0][0] = max(async_depth * 2 / 3, 1U); + bfqd->async_depths[1][1] = max(async_depth >> 1, 1U); + bfqd->async_depths[1][0] = max(async_depth >> 2, 1U); /* - * In-word depths in case some bfq_queue is being weight- - * raised: leaving ~63% of tags for sync reads. This is the - * highest percentage for which, in our tests, application - * start-up times didn't suffer from any regression due to tag - * shortage. + * Due to cgroup qos, the allowed request for bfqq might be 1 */ - /* no more than ~18% of tags for async I/O */ - bfqd->async_depths[1][0] = max((nr_requests * 3) >> 4, 1U); - /* no more than ~37% of tags for sync writes (~20% extra tags) */ - bfqd->async_depths[1][1] = max((nr_requests * 6) >> 4, 1U); - blk_mq_set_min_shallow_depth(q, 1); } @@ -7365,6 +7355,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq) blk_queue_flag_set(QUEUE_FLAG_DISABLE_WBT_DEF, q); wbt_disable_default(q->disk); blk_stat_enable_accounting(q); + q->async_depth = (q->nr_requests * 3) >> 2; return 0; -- 2.51.0 Explain the attribute and the default value in different case. Signed-off-by: Yu Kuai Reviewed-by: Nilay Shroff --- Documentation/ABI/stable/sysfs-block | 34 ++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block index 0ed10aeff86b..aa1e94169666 100644 --- a/Documentation/ABI/stable/sysfs-block +++ b/Documentation/ABI/stable/sysfs-block @@ -609,6 +609,40 @@ Description: enabled, and whether tags are shared. +What: /sys/block//queue/async_depth +Date: August 2025 +Contact: linux-block@vger.kernel.org +Description: + [RW] Controls how many asynchronous requests may be allocated in the + block layer. The value is always capped at nr_requests. + + When no elevator is active (none): + - async_depth is always equal to nr_requests. + + For bfq scheduler: + - By default, async_depth is set to 75% of nr_requests. + Internal limits are then derived from this value: + * Sync writes: limited to async_depth (≈75% of nr_requests). + * Async I/O: limited to ~2/3 of async_depth (≈50% of nr_requests). + + If a bfq_queue is weight-raised: + * Sync writes: limited to ~1/2 of async_depth (≈37% of nr_requests). + * Async I/O: limited to ~1/4 of async_depth (≈18% of nr_requests). + + - If the user writes a custom value to async_depth, BFQ will recompute + these limits proportionally based on the new value. + + For Kyber: + - By default async_depth is set to 75% of nr_requests. + - If the user writes a custom value to async_depth, then it override the + default and directly control the limit for writes and async I/O. + + For mq-deadline: + - By default async_depth is set to nr_requests. + - If the user writes a custom value to async_depth, then it override the + default and directly control the limit for writes and async I/O. + + What: /sys/block//queue/nr_zones Date: November 2018 Contact: Damien Le Moal -- 2.51.0