block: wire-up support for passthrough plugging
Add support for plugging in passthrough path. When plugging is enabled, the requests are added to a plug instead of getting dispatched to the driver. And when the plug is finished, the whole batch gets dispatched via ->queue_rqs which turns out to be more efficient. Otherwise dispatching used to happen via ->queue_rq, one request at a time. Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20220511054750.20432-3-joshi.k@samsung.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
ee692a21e9
commit
1c2d2fff6d
@ -2340,6 +2340,40 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||||||
blk_mq_hctx_mark_pending(hctx, ctx);
|
blk_mq_hctx_mark_pending(hctx, ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
|
||||||
|
* queues. This is important for md arrays to benefit from merging
|
||||||
|
* requests.
|
||||||
|
*/
|
||||||
|
static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
|
||||||
|
{
|
||||||
|
if (plug->multiple_queues)
|
||||||
|
return BLK_MAX_REQUEST_COUNT * 2;
|
||||||
|
return BLK_MAX_REQUEST_COUNT;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
|
||||||
|
{
|
||||||
|
struct request *last = rq_list_peek(&plug->mq_list);
|
||||||
|
|
||||||
|
if (!plug->rq_count) {
|
||||||
|
trace_block_plug(rq->q);
|
||||||
|
} else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
|
||||||
|
(!blk_queue_nomerges(rq->q) &&
|
||||||
|
blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
|
||||||
|
blk_mq_flush_plug_list(plug, false);
|
||||||
|
trace_block_plug(rq->q);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!plug->multiple_queues && last && last->q != rq->q)
|
||||||
|
plug->multiple_queues = true;
|
||||||
|
if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
|
||||||
|
plug->has_elevator = true;
|
||||||
|
rq->rq_next = NULL;
|
||||||
|
rq_list_add(&plug->mq_list, rq);
|
||||||
|
plug->rq_count++;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_mq_request_bypass_insert - Insert a request at dispatch list.
|
* blk_mq_request_bypass_insert - Insert a request at dispatch list.
|
||||||
* @rq: Pointer to request to be inserted.
|
* @rq: Pointer to request to be inserted.
|
||||||
@ -2353,7 +2387,12 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
|
|||||||
bool run_queue)
|
bool run_queue)
|
||||||
{
|
{
|
||||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||||
|
struct blk_plug *plug = current->plug;
|
||||||
|
|
||||||
|
if (plug) {
|
||||||
|
blk_add_rq_to_plug(plug, rq);
|
||||||
|
return;
|
||||||
|
}
|
||||||
spin_lock(&hctx->lock);
|
spin_lock(&hctx->lock);
|
||||||
if (at_head)
|
if (at_head)
|
||||||
list_add(&rq->queuelist, &hctx->dispatch);
|
list_add(&rq->queuelist, &hctx->dispatch);
|
||||||
@ -2676,40 +2715,6 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
|||||||
hctx->queue->mq_ops->commit_rqs(hctx);
|
hctx->queue->mq_ops->commit_rqs(hctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
|
|
||||||
* queues. This is important for md arrays to benefit from merging
|
|
||||||
* requests.
|
|
||||||
*/
|
|
||||||
static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
|
|
||||||
{
|
|
||||||
if (plug->multiple_queues)
|
|
||||||
return BLK_MAX_REQUEST_COUNT * 2;
|
|
||||||
return BLK_MAX_REQUEST_COUNT;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
|
|
||||||
{
|
|
||||||
struct request *last = rq_list_peek(&plug->mq_list);
|
|
||||||
|
|
||||||
if (!plug->rq_count) {
|
|
||||||
trace_block_plug(rq->q);
|
|
||||||
} else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
|
|
||||||
(!blk_queue_nomerges(rq->q) &&
|
|
||||||
blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
|
|
||||||
blk_mq_flush_plug_list(plug, false);
|
|
||||||
trace_block_plug(rq->q);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!plug->multiple_queues && last && last->q != rq->q)
|
|
||||||
plug->multiple_queues = true;
|
|
||||||
if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
|
|
||||||
plug->has_elevator = true;
|
|
||||||
rq->rq_next = NULL;
|
|
||||||
rq_list_add(&plug->mq_list, rq);
|
|
||||||
plug->rq_count++;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool blk_mq_attempt_bio_merge(struct request_queue *q,
|
static bool blk_mq_attempt_bio_merge(struct request_queue *q,
|
||||||
struct bio *bio, unsigned int nr_segs)
|
struct bio *bio, unsigned int nr_segs)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user