forked from Minki/linux
block: add a bdev_max_discard_sectors helper
Add a helper to query the number of sectors support per each discard bio based on the block device and use this helper to stop various places from poking into the request_queue to see if discard is supported and if so how much. This mirrors what is done e.g. for write zeroes as well. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Acked-by: Christoph Böhmwalder <christoph.boehmwalder@linbit.com> [drbd] Acked-by: Coly Li <colyli@suse.de> [bcache] Acked-by: David Sterba <dsterba@suse.com> [btrfs] Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Link: https://lore.kernel.org/r/20220415045258.199825-24-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
e3cc28ea28
commit
cf0fbf894b
@ -1439,7 +1439,8 @@ static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
|
||||
static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
|
||||
struct drbd_backing_dev *nbc)
|
||||
{
|
||||
struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
|
||||
struct block_device *bdev = nbc->backing_bdev;
|
||||
struct request_queue *q = bdev->bd_disk->queue;
|
||||
|
||||
if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
|
||||
disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
|
||||
@ -1455,6 +1456,7 @@ static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *dis
|
||||
|
||||
if (disk_conf->rs_discard_granularity) {
|
||||
int orig_value = disk_conf->rs_discard_granularity;
|
||||
sector_t discard_size = bdev_max_discard_sectors(bdev) << 9;
|
||||
int remainder;
|
||||
|
||||
if (q->limits.discard_granularity > disk_conf->rs_discard_granularity)
|
||||
@ -1463,8 +1465,8 @@ static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *dis
|
||||
remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity;
|
||||
disk_conf->rs_discard_granularity += remainder;
|
||||
|
||||
if (disk_conf->rs_discard_granularity > q->limits.max_discard_sectors << 9)
|
||||
disk_conf->rs_discard_granularity = q->limits.max_discard_sectors << 9;
|
||||
if (disk_conf->rs_discard_granularity > discard_size)
|
||||
disk_conf->rs_discard_granularity = discard_size;
|
||||
|
||||
if (disk_conf->rs_discard_granularity != orig_value)
|
||||
drbd_info(device, "rs_discard_granularity changed to %d\n",
|
||||
|
@ -1524,7 +1524,7 @@ int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, u
|
||||
granularity = max(q->limits.discard_granularity >> 9, 1U);
|
||||
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
|
||||
|
||||
max_discard_sectors = min(q->limits.max_discard_sectors, (1U << 22));
|
||||
max_discard_sectors = min(bdev_max_discard_sectors(bdev), (1U << 22));
|
||||
max_discard_sectors -= max_discard_sectors % granularity;
|
||||
if (unlikely(!max_discard_sectors))
|
||||
goto zero_out;
|
||||
|
@ -52,8 +52,7 @@ static inline int rnbd_dev_get_max_discard_sects(const struct rnbd_dev *dev)
|
||||
if (!blk_queue_discard(bdev_get_queue(dev->bdev)))
|
||||
return 0;
|
||||
|
||||
return blk_queue_get_max_sectors(bdev_get_queue(dev->bdev),
|
||||
REQ_OP_DISCARD);
|
||||
return bdev_max_discard_sectors(dev->bdev);
|
||||
}
|
||||
|
||||
static inline int rnbd_dev_get_discard_granularity(const struct rnbd_dev *dev)
|
||||
|
@ -311,7 +311,7 @@ static void do_region(int op, int op_flags, unsigned region,
|
||||
* Reject unsupported discard and write same requests.
|
||||
*/
|
||||
if (op == REQ_OP_DISCARD)
|
||||
special_cmd_max_sectors = q->limits.max_discard_sectors;
|
||||
special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev);
|
||||
else if (op == REQ_OP_WRITE_ZEROES)
|
||||
special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
|
||||
if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
|
||||
|
@ -829,9 +829,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the underlying struct block_device request_queue supports
|
||||
* the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
|
||||
* in ATA and we need to set TPE=1
|
||||
* Check if the underlying struct block_device supports discard and if yes
|
||||
* configure the UNMAP parameters.
|
||||
*/
|
||||
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
|
||||
struct block_device *bdev)
|
||||
@ -843,7 +842,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
|
||||
return false;
|
||||
|
||||
attrib->max_unmap_lba_count =
|
||||
q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
|
||||
bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9);
|
||||
/*
|
||||
* Currently hardcoded to 1 in Linux/SCSI code..
|
||||
*/
|
||||
|
@ -1196,9 +1196,8 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
|
||||
unsigned int *issued)
|
||||
{
|
||||
struct block_device *bdev = dc->bdev;
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
unsigned int max_discard_blocks =
|
||||
SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
|
||||
SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
|
||||
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
||||
struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
|
||||
&(dcc->fstrim_list) : &(dcc->wait_list);
|
||||
@ -1375,9 +1374,8 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
|
||||
struct discard_cmd *dc;
|
||||
struct discard_info di = {0};
|
||||
struct rb_node **insert_p = NULL, *insert_parent = NULL;
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
unsigned int max_discard_blocks =
|
||||
SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
|
||||
SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
|
||||
block_t end = lstart + len;
|
||||
|
||||
dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
|
||||
|
@ -1254,6 +1254,11 @@ bdev_zone_write_granularity(struct block_device *bdev)
|
||||
int bdev_alignment_offset(struct block_device *bdev);
|
||||
unsigned int bdev_discard_alignment(struct block_device *bdev);
|
||||
|
||||
static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
|
||||
{
|
||||
return bdev_get_queue(bdev)->limits.max_discard_sectors;
|
||||
}
|
||||
|
||||
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
Loading…
Reference in New Issue
Block a user