block: move a few merge helpers out of <linux/blkdev.h>
These are block-layer internal helpers, so move them to block/blk.h and block/blk-merge.c. Also update a comment a bit to use better grammar. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Link: https://lore.kernel.org/r/20210920123328.1399408-16-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b81e0c2372
commit
badf7f6437
@ -558,6 +558,23 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq)
|
|||||||
return queue_max_segments(rq->q);
|
return queue_max_segments(rq->q);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
|
||||||
|
sector_t offset)
|
||||||
|
{
|
||||||
|
struct request_queue *q = rq->q;
|
||||||
|
|
||||||
|
if (blk_rq_is_passthrough(rq))
|
||||||
|
return q->limits.max_hw_sectors;
|
||||||
|
|
||||||
|
if (!q->limits.chunk_sectors ||
|
||||||
|
req_op(rq) == REQ_OP_DISCARD ||
|
||||||
|
req_op(rq) == REQ_OP_SECURE_ERASE)
|
||||||
|
return blk_queue_get_max_sectors(q, req_op(rq));
|
||||||
|
|
||||||
|
return min(blk_max_size_offset(q, offset, 0),
|
||||||
|
blk_queue_get_max_sectors(q, req_op(rq)));
|
||||||
|
}
|
||||||
|
|
||||||
static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
|
static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
|
||||||
unsigned int nr_phys_segs)
|
unsigned int nr_phys_segs)
|
||||||
{
|
{
|
||||||
@ -718,6 +735,13 @@ static enum elv_merge blk_try_req_merge(struct request *req,
|
|||||||
return ELEVATOR_NO_MERGE;
|
return ELEVATOR_NO_MERGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
|
||||||
|
{
|
||||||
|
if (bio_page(a) == bio_page(b) && bio_offset(a) == bio_offset(b))
|
||||||
|
return true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For non-mq, this has to be called with the request spinlock acquired.
|
* For non-mq, this has to be called with the request spinlock acquired.
|
||||||
* For mq with scheduling, the appropriate queue wide lock should be held.
|
* For mq with scheduling, the appropriate queue wide lock should be held.
|
||||||
|
38
block/blk.h
38
block/blk.h
@ -96,6 +96,44 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
|
|||||||
return __bvec_gap_to_prev(q, bprv, offset);
|
return __bvec_gap_to_prev(q, bprv, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool rq_mergeable(struct request *rq)
|
||||||
|
{
|
||||||
|
if (blk_rq_is_passthrough(rq))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (req_op(rq) == REQ_OP_FLUSH)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (req_op(rq) == REQ_OP_WRITE_ZEROES)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (req_op(rq) == REQ_OP_ZONE_APPEND)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
|
||||||
|
return false;
|
||||||
|
if (rq->rq_flags & RQF_NOMERGE_FLAGS)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There are two different ways to handle DISCARD merges:
|
||||||
|
* 1) If max_discard_segments > 1, the driver treats every bio as a range and
|
||||||
|
* send the bios to controller together. The ranges don't need to be
|
||||||
|
* contiguous.
|
||||||
|
* 2) Otherwise, the request will be normal read/write requests. The ranges
|
||||||
|
* need to be contiguous.
|
||||||
|
*/
|
||||||
|
static inline bool blk_discard_mergable(struct request *req)
|
||||||
|
{
|
||||||
|
if (req_op(req) == REQ_OP_DISCARD &&
|
||||||
|
queue_max_discard_segments(req->q) > 1)
|
||||||
|
return true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||||
void blk_flush_integrity(void);
|
void blk_flush_integrity(void);
|
||||||
bool __bio_integrity_endio(struct bio *);
|
bool __bio_integrity_endio(struct bio *);
|
||||||
|
@ -745,37 +745,6 @@ static inline bool rq_is_sync(struct request *rq)
|
|||||||
return op_is_sync(rq->cmd_flags);
|
return op_is_sync(rq->cmd_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool rq_mergeable(struct request *rq)
|
|
||||||
{
|
|
||||||
if (blk_rq_is_passthrough(rq))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (req_op(rq) == REQ_OP_FLUSH)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (req_op(rq) == REQ_OP_WRITE_ZEROES)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (req_op(rq) == REQ_OP_ZONE_APPEND)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
|
|
||||||
return false;
|
|
||||||
if (rq->rq_flags & RQF_NOMERGE_FLAGS)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
|
|
||||||
{
|
|
||||||
if (bio_page(a) == bio_page(b) &&
|
|
||||||
bio_offset(a) == bio_offset(b))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int blk_queue_depth(struct request_queue *q)
|
static inline unsigned int blk_queue_depth(struct request_queue *q)
|
||||||
{
|
{
|
||||||
if (q->queue_depth)
|
if (q->queue_depth)
|
||||||
@ -1030,23 +999,6 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
|
|||||||
return min(q->limits.max_sectors, chunk_sectors);
|
return min(q->limits.max_sectors, chunk_sectors);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
|
|
||||||
sector_t offset)
|
|
||||||
{
|
|
||||||
struct request_queue *q = rq->q;
|
|
||||||
|
|
||||||
if (blk_rq_is_passthrough(rq))
|
|
||||||
return q->limits.max_hw_sectors;
|
|
||||||
|
|
||||||
if (!q->limits.chunk_sectors ||
|
|
||||||
req_op(rq) == REQ_OP_DISCARD ||
|
|
||||||
req_op(rq) == REQ_OP_SECURE_ERASE)
|
|
||||||
return blk_queue_get_max_sectors(q, req_op(rq));
|
|
||||||
|
|
||||||
return min(blk_max_size_offset(q, offset, 0),
|
|
||||||
blk_queue_get_max_sectors(q, req_op(rq)));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int blk_rq_count_bios(struct request *rq)
|
static inline unsigned int blk_rq_count_bios(struct request *rq)
|
||||||
{
|
{
|
||||||
unsigned int nr_bios = 0;
|
unsigned int nr_bios = 0;
|
||||||
@ -1490,22 +1442,6 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
|
|||||||
return offset << SECTOR_SHIFT;
|
return offset << SECTOR_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Two cases of handling DISCARD merge:
|
|
||||||
* If max_discard_segments > 1, the driver takes every bio
|
|
||||||
* as a range and send them to controller together. The ranges
|
|
||||||
* needn't to be contiguous.
|
|
||||||
* Otherwise, the bios/requests will be handled as same as
|
|
||||||
* others which should be contiguous.
|
|
||||||
*/
|
|
||||||
static inline bool blk_discard_mergable(struct request *req)
|
|
||||||
{
|
|
||||||
if (req_op(req) == REQ_OP_DISCARD &&
|
|
||||||
queue_max_discard_segments(req->q) > 1)
|
|
||||||
return true;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int bdev_discard_alignment(struct block_device *bdev)
|
static inline int bdev_discard_alignment(struct block_device *bdev)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(bdev);
|
struct request_queue *q = bdev_get_queue(bdev);
|
||||||
|
Loading…
Reference in New Issue
Block a user