mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
block: add a bdev_limits helper
Add a helper to get the queue_limits from the bdev without having to poke into the request_queue. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: John Garry <john.g.garry@oracle.com> Link: https://lore.kernel.org/r/20241029141937.249920-1-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
e4e535bff2
commit
2f5a65ef30
@ -411,10 +411,9 @@ struct bio *bio_split_zone_append(struct bio *bio,
|
|||||||
*/
|
*/
|
||||||
struct bio *bio_split_to_limits(struct bio *bio)
|
struct bio *bio_split_to_limits(struct bio *bio)
|
||||||
{
|
{
|
||||||
const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
|
|
||||||
unsigned int nr_segs;
|
unsigned int nr_segs;
|
||||||
|
|
||||||
return __bio_split_to_limits(bio, lim, &nr_segs);
|
return __bio_split_to_limits(bio, bdev_limits(bio->bi_bdev), &nr_segs);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(bio_split_to_limits);
|
EXPORT_SYMBOL(bio_split_to_limits);
|
||||||
|
|
||||||
|
@ -661,7 +661,7 @@ EXPORT_SYMBOL(blk_stack_limits);
|
|||||||
void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
|
void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
|
||||||
sector_t offset, const char *pfx)
|
sector_t offset, const char *pfx)
|
||||||
{
|
{
|
||||||
if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits,
|
if (blk_stack_limits(t, bdev_limits(bdev),
|
||||||
get_start_sect(bdev) + offset))
|
get_start_sect(bdev) + offset))
|
||||||
pr_notice("%s: Warning: Device %pg is misaligned\n",
|
pr_notice("%s: Warning: Device %pg is misaligned\n",
|
||||||
pfx, bdev);
|
pfx, bdev);
|
||||||
|
@ -3360,7 +3360,7 @@ static int cache_iterate_devices(struct dm_target *ti,
|
|||||||
static void disable_passdown_if_not_supported(struct cache *cache)
|
static void disable_passdown_if_not_supported(struct cache *cache)
|
||||||
{
|
{
|
||||||
struct block_device *origin_bdev = cache->origin_dev->bdev;
|
struct block_device *origin_bdev = cache->origin_dev->bdev;
|
||||||
struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
|
struct queue_limits *origin_limits = bdev_limits(origin_bdev);
|
||||||
const char *reason = NULL;
|
const char *reason = NULL;
|
||||||
|
|
||||||
if (!cache->features.discard_passdown)
|
if (!cache->features.discard_passdown)
|
||||||
@ -3382,7 +3382,7 @@ static void disable_passdown_if_not_supported(struct cache *cache)
|
|||||||
static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
|
static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
|
||||||
{
|
{
|
||||||
struct block_device *origin_bdev = cache->origin_dev->bdev;
|
struct block_device *origin_bdev = cache->origin_dev->bdev;
|
||||||
struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
|
struct queue_limits *origin_limits = bdev_limits(origin_bdev);
|
||||||
|
|
||||||
if (!cache->features.discard_passdown) {
|
if (!cache->features.discard_passdown) {
|
||||||
/* No passdown is done so setting own virtual limits */
|
/* No passdown is done so setting own virtual limits */
|
||||||
|
@ -2020,7 +2020,7 @@ static void clone_resume(struct dm_target *ti)
|
|||||||
static void disable_passdown_if_not_supported(struct clone *clone)
|
static void disable_passdown_if_not_supported(struct clone *clone)
|
||||||
{
|
{
|
||||||
struct block_device *dest_dev = clone->dest_dev->bdev;
|
struct block_device *dest_dev = clone->dest_dev->bdev;
|
||||||
struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits;
|
struct queue_limits *dest_limits = bdev_limits(dest_dev);
|
||||||
const char *reason = NULL;
|
const char *reason = NULL;
|
||||||
|
|
||||||
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
|
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
|
||||||
@ -2041,7 +2041,7 @@ static void disable_passdown_if_not_supported(struct clone *clone)
|
|||||||
static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
|
static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
|
||||||
{
|
{
|
||||||
struct block_device *dest_bdev = clone->dest_dev->bdev;
|
struct block_device *dest_bdev = clone->dest_dev->bdev;
|
||||||
struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits;
|
struct queue_limits *dest_limits = bdev_limits(dest_bdev);
|
||||||
|
|
||||||
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
|
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
|
||||||
/* No passdown is done so we set our own virtual limits */
|
/* No passdown is done so we set our own virtual limits */
|
||||||
|
@ -2842,7 +2842,7 @@ static void disable_discard_passdown_if_not_supported(struct pool_c *pt)
|
|||||||
{
|
{
|
||||||
struct pool *pool = pt->pool;
|
struct pool *pool = pt->pool;
|
||||||
struct block_device *data_bdev = pt->data_dev->bdev;
|
struct block_device *data_bdev = pt->data_dev->bdev;
|
||||||
struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
|
struct queue_limits *data_limits = bdev_limits(data_bdev);
|
||||||
const char *reason = NULL;
|
const char *reason = NULL;
|
||||||
|
|
||||||
if (!pt->adjusted_pf.discard_passdown)
|
if (!pt->adjusted_pf.discard_passdown)
|
||||||
|
@ -707,11 +707,8 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
|
|||||||
* zoned mode. In this case, we don't have a valid max zone
|
* zoned mode. In this case, we don't have a valid max zone
|
||||||
* append size.
|
* append size.
|
||||||
*/
|
*/
|
||||||
if (bdev_is_zoned(device->bdev)) {
|
if (bdev_is_zoned(device->bdev))
|
||||||
blk_stack_limits(lim,
|
blk_stack_limits(lim, bdev_limits(device->bdev), 0);
|
||||||
&bdev_get_queue(device->bdev)->limits,
|
|
||||||
0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1159,6 +1159,11 @@ enum blk_default_limits {
|
|||||||
*/
|
*/
|
||||||
#define BLK_DEF_MAX_SECTORS_CAP 2560u
|
#define BLK_DEF_MAX_SECTORS_CAP 2560u
|
||||||
|
|
||||||
|
static inline struct queue_limits *bdev_limits(struct block_device *bdev)
|
||||||
|
{
|
||||||
|
return &bdev_get_queue(bdev)->limits;
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned long queue_segment_boundary(const struct request_queue *q)
|
static inline unsigned long queue_segment_boundary(const struct request_queue *q)
|
||||||
{
|
{
|
||||||
return q->limits.seg_boundary_mask;
|
return q->limits.seg_boundary_mask;
|
||||||
@ -1293,23 +1298,23 @@ unsigned int bdev_discard_alignment(struct block_device *bdev);
|
|||||||
|
|
||||||
static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
|
static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
|
||||||
{
|
{
|
||||||
return bdev_get_queue(bdev)->limits.max_discard_sectors;
|
return bdev_limits(bdev)->max_discard_sectors;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
|
static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
|
||||||
{
|
{
|
||||||
return bdev_get_queue(bdev)->limits.discard_granularity;
|
return bdev_limits(bdev)->discard_granularity;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int
|
static inline unsigned int
|
||||||
bdev_max_secure_erase_sectors(struct block_device *bdev)
|
bdev_max_secure_erase_sectors(struct block_device *bdev)
|
||||||
{
|
{
|
||||||
return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
|
return bdev_limits(bdev)->max_secure_erase_sectors;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
|
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
|
||||||
{
|
{
|
||||||
return bdev_get_queue(bdev)->limits.max_write_zeroes_sectors;
|
return bdev_limits(bdev)->max_write_zeroes_sectors;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool bdev_nonrot(struct block_device *bdev)
|
static inline bool bdev_nonrot(struct block_device *bdev)
|
||||||
@ -1345,7 +1350,7 @@ static inline bool bdev_write_cache(struct block_device *bdev)
|
|||||||
|
|
||||||
static inline bool bdev_fua(struct block_device *bdev)
|
static inline bool bdev_fua(struct block_device *bdev)
|
||||||
{
|
{
|
||||||
return bdev_get_queue(bdev)->limits.features & BLK_FEAT_FUA;
|
return bdev_limits(bdev)->features & BLK_FEAT_FUA;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool bdev_nowait(struct block_device *bdev)
|
static inline bool bdev_nowait(struct block_device *bdev)
|
||||||
|
Loading…
Reference in New Issue
Block a user