block: remove bio_add_zone_append_page

This is only used by the nvmet zns passthrough code, which can trivially
just use bio_add_pc_page and do the sanity check for the max zone append
limit itself.

All future zoned file systems should follow the btrfs lead and let the
upper layers fill up bios unlimited by hardware constraints and split
them to the limits in the I/O submission handler.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20241030051859.280923-3-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2024-10-30 06:18:52 +01:00 committed by Jens Axboe
parent cafd00d0e9
commit f187b9bf1a
3 changed files with 13 additions and 43 deletions

View File

@ -1064,39 +1064,6 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio,
} }
EXPORT_SYMBOL(bio_add_pc_page); EXPORT_SYMBOL(bio_add_pc_page);
/**
* bio_add_zone_append_page - attempt to add page to zone-append bio
* @bio: destination bio
* @page: page to add
* @len: vec entry length
* @offset: vec entry offset
*
* Attempt to add a page to the bio_vec maplist of a bio that will be submitted
* for a zone-append request. This can fail for a number of reasons, such as the
* bio being full or the target block device is not a zoned block device or
* other limitations of the target block device. The target block device must
* allow bio's up to PAGE_SIZE, so it is always possible to add a single page
* to an empty bio.
*
* Returns: number of bytes added to the bio, or 0 in case of a failure.
*/
int bio_add_zone_append_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
bool same_page = false;
if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
return 0;
if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev)))
return 0;
return bio_add_hw_page(q, bio, page, len, offset,
queue_max_zone_append_sectors(q), &same_page);
}
EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
/** /**
* __bio_add_page - add page(s) to a bio in a new segment * __bio_add_page - add page(s) to a bio in a new segment
* @bio: destination bio * @bio: destination bio

View File

@ -537,6 +537,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
u16 status = NVME_SC_SUCCESS; u16 status = NVME_SC_SUCCESS;
unsigned int total_len = 0; unsigned int total_len = 0;
struct scatterlist *sg; struct scatterlist *sg;
u32 data_len = nvmet_rw_data_len(req);
struct bio *bio; struct bio *bio;
int sg_cnt; int sg_cnt;
@ -544,6 +545,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
return; return;
if (data_len >
bdev_max_zone_append_sectors(req->ns->bdev) << SECTOR_SHIFT) {
req->error_loc = offsetof(struct nvme_rw_command, length);
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out;
}
if (!req->sg_cnt) { if (!req->sg_cnt) {
nvmet_req_complete(req, 0); nvmet_req_complete(req, 0);
return; return;
@ -576,20 +584,17 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
bio->bi_opf |= REQ_FUA; bio->bi_opf |= REQ_FUA;
for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) { for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
struct page *p = sg_page(sg); unsigned int len = sg->length;
unsigned int l = sg->length;
unsigned int o = sg->offset;
unsigned int ret;
ret = bio_add_zone_append_page(bio, p, l, o); if (bio_add_pc_page(bdev_get_queue(bio->bi_bdev), bio,
if (ret != sg->length) { sg_page(sg), len, sg->offset) != len) {
status = NVME_SC_INTERNAL; status = NVME_SC_INTERNAL;
goto out_put_bio; goto out_put_bio;
} }
total_len += sg->length; total_len += len;
} }
if (total_len != nvmet_rw_data_len(req)) { if (total_len != data_len) {
status = NVME_SC_INTERNAL | NVME_STATUS_DNR; status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
goto out_put_bio; goto out_put_bio;
} }

View File

@ -418,8 +418,6 @@ bool __must_check bio_add_folio(struct bio *bio, struct folio *folio,
size_t len, size_t off); size_t len, size_t off);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int); unsigned int, unsigned int);
int bio_add_zone_append_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset);
void __bio_add_page(struct bio *bio, struct page *page, void __bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off); unsigned int len, unsigned int off);
void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,