Merge branch 'for-6.11/block-limits' into for-6.11/block

Merge in last round of queue limits changes from Christoph.

* for-6.11/block-limits: (26 commits)
  block: move the bounce flag into the features field
  block: move the skip_tagset_quiesce flag to queue_limits
  block: move the pci_p2pdma flag to queue_limits
  block: move the zone_resetall flag to queue_limits
  block: move the zoned flag into the features field
  block: move the poll flag to queue_limits
  block: move the dax flag to queue_limits
  block: move the nowait flag to queue_limits
  block: move the synchronous flag to queue_limits
  block: move the stable_writes flag to queue_limits
  block: move the io_stat flag setting to queue_limits
  block: move the add_random flag to queue_limits
  block: move the nonrot flag to queue_limits
  block: move cache control settings out of queue->flags
  block: remove blk_flush_policy
  block: freeze the queue in queue_attr_store
  nbd: move setting the cache control flags to __nbd_set_size
  virtio_blk: remove virtblk_update_cache_mode
  loop: fold loop_update_rotational into loop_reconfigure_limits
  loop: also use the default block size from an underlying block device
  ...

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2024-06-19 08:02:34 -06:00
commit 69c34f07e4
61 changed files with 575 additions and 729 deletions

View File

@ -46,41 +46,50 @@ worry if the underlying devices need any explicit cache flushing and how
the Forced Unit Access is implemented. The REQ_PREFLUSH and REQ_FUA flags
may both be set on a single bio.
Implementation details for bio based block drivers
--------------------------------------------------------------
These drivers will always see the REQ_PREFLUSH and REQ_FUA bits as they sit
directly below the submit_bio interface. For remapping drivers the REQ_FUA
bits need to be propagated to underlying devices, and a global flush needs
to be implemented for bios with the REQ_PREFLUSH bit set. For real device
drivers that do not have a volatile cache the REQ_PREFLUSH and REQ_FUA bits
on non-empty bios can simply be ignored, and REQ_PREFLUSH requests without
data can be completed successfully without doing any work. Drivers for
devices with volatile caches need to implement the support for these
flags themselves without any help from the block layer.
Implementation details for request_fn based block drivers
---------------------------------------------------------
Feature settings for block drivers
----------------------------------
For devices that do not support volatile write caches there is no driver
support required, the block layer completes empty REQ_PREFLUSH requests before
entering the driver and strips off the REQ_PREFLUSH and REQ_FUA bits from
requests that have a payload. For devices with volatile write caches the
driver needs to tell the block layer that it supports flushing caches by
doing::
requests that have a payload.
blk_queue_write_cache(sdkp->disk->queue, true, false);
For devices with volatile write caches the driver needs to tell the block layer
that it supports flushing caches by setting the
and handle empty REQ_OP_FLUSH requests in its prep_fn/request_fn. Note that
REQ_PREFLUSH requests with a payload are automatically turned into a sequence
of an empty REQ_OP_FLUSH request followed by the actual write by the block
layer. For devices that also support the FUA bit the block layer needs
to be told to pass through the REQ_FUA bit using::
BLK_FEAT_WRITE_CACHE
blk_queue_write_cache(sdkp->disk->queue, true, true);
flag in the queue_limits feature field. For devices that also support the FUA
bit the block layer needs to be told to pass on the REQ_FUA bit by also setting
the
and the driver must handle write requests that have the REQ_FUA bit set
in prep_fn/request_fn. If the FUA bit is not natively supported the block
layer turns it into an empty REQ_OP_FLUSH request after the actual write.
BLK_FEAT_FUA
flag in the features field of the queue_limits structure.
Implementation details for bio based block drivers
--------------------------------------------------
For bio based drivers the REQ_PREFLUSH and REQ_FUA bit are simplify passed on
to the driver if the drivers sets the BLK_FEAT_WRITE_CACHE flag and the drivers
needs to handle them.
*NOTE*: The REQ_FUA bit also gets passed on when the BLK_FEAT_FUA flags is
_not_ set. Any bio based driver that sets BLK_FEAT_WRITE_CACHE also needs to
handle REQ_FUA.
For remapping drivers the REQ_FUA bits need to be propagated to underlying
devices, and a global flush needs to be implemented for bios with the
REQ_PREFLUSH bit set.
Implementation details for blk-mq drivers
-----------------------------------------
When the BLK_FEAT_WRITE_CACHE flag is set, REQ_OP_WRITE | REQ_PREFLUSH requests
with a payload are automatically turned into a sequence of a REQ_OP_FLUSH
request followed by the actual write by the block layer.
When the BLK_FEAT_FUA flags is set, the REQ_FUA bit simplify passed on for the
REQ_OP_WRITE request, else a REQ_OP_FLUSH request is sent by the block layer
after the completion of the write request for bio submissions with the REQ_FUA
bit set.

View File

@ -98,6 +98,7 @@ static int __init nfhd_init_one(int id, u32 blocks, u32 bsize)
{
struct queue_limits lim = {
.logical_block_size = bsize,
.features = BLK_FEAT_ROTATIONAL,
};
struct nfhd_device *dev;
int dev_id = id - NFHD_DEV_OFFSET;

View File

@ -835,6 +835,7 @@ static int ubd_add(int n, char **error_out)
struct queue_limits lim = {
.max_segments = MAX_SG,
.seg_boundary_mask = PAGE_SIZE - 1,
.features = BLK_FEAT_WRITE_CACHE,
};
struct gendisk *disk;
int err = 0;
@ -881,8 +882,6 @@ static int ubd_add(int n, char **error_out)
goto out_cleanup_tags;
}
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_write_cache(disk->queue, true, false);
disk->major = UBD_MAJOR;
disk->first_minor = n << UBD_SHIFT;
disk->minors = 1 << UBD_SHIFT;

View File

@ -263,6 +263,9 @@ static const struct proc_ops simdisk_proc_ops = {
static int __init simdisk_setup(struct simdisk *dev, int which,
struct proc_dir_entry *procdir)
{
struct queue_limits lim = {
.features = BLK_FEAT_ROTATIONAL,
};
char tmp[2] = { '0' + which, 0 };
int err;
@ -271,7 +274,7 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
spin_lock_init(&dev->lock);
dev->users = 0;
dev->gd = blk_alloc_disk(NULL, NUMA_NO_NODE);
dev->gd = blk_alloc_disk(&lim, NUMA_NO_NODE);
if (IS_ERR(dev->gd)) {
err = PTR_ERR(dev->gd);
goto out;

View File

@ -782,7 +782,7 @@ void submit_bio_noacct(struct bio *bio)
if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE &&
bio_op(bio) != REQ_OP_ZONE_APPEND))
goto end_io;
if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
if (!bdev_write_cache(bdev)) {
bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
if (!bio_sectors(bio)) {
status = BLK_STS_OK;
@ -791,7 +791,7 @@ void submit_bio_noacct(struct bio *bio)
}
}
if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
if (!(q->limits.features & BLK_FEAT_POLL))
bio_clear_polled(bio);
switch (bio_op(bio)) {
@ -915,8 +915,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
return 0;
q = bdev_get_queue(bdev);
if (cookie == BLK_QC_T_NONE ||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
if (cookie == BLK_QC_T_NONE || !(q->limits.features & BLK_FEAT_POLL))
return 0;
blk_flush_plug(current->plug, false);

View File

@ -100,23 +100,6 @@ blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
}
static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
{
unsigned int policy = 0;
if (blk_rq_sectors(rq))
policy |= REQ_FSEQ_DATA;
if (fflags & (1UL << QUEUE_FLAG_WC)) {
if (rq->cmd_flags & REQ_PREFLUSH)
policy |= REQ_FSEQ_PREFLUSH;
if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
(rq->cmd_flags & REQ_FUA))
policy |= REQ_FSEQ_POSTFLUSH;
}
return policy;
}
static unsigned int blk_flush_cur_seq(struct request *rq)
{
return 1 << ffz(rq->flush.seq);
@ -398,19 +381,32 @@ static void blk_rq_init_flush(struct request *rq)
bool blk_insert_flush(struct request *rq)
{
struct request_queue *q = rq->q;
unsigned long fflags = q->queue_flags; /* may change, cache */
unsigned int policy = blk_flush_policy(fflags, rq);
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
bool supports_fua = q->limits.features & BLK_FEAT_FUA;
unsigned int policy = 0;
/* FLUSH/FUA request must never be merged */
WARN_ON_ONCE(rq->bio != rq->biotail);
if (blk_rq_sectors(rq))
policy |= REQ_FSEQ_DATA;
/*
* Check which flushes we need to sequence for this operation.
*/
if (blk_queue_write_cache(q)) {
if (rq->cmd_flags & REQ_PREFLUSH)
policy |= REQ_FSEQ_PREFLUSH;
if ((rq->cmd_flags & REQ_FUA) && !supports_fua)
policy |= REQ_FSEQ_POSTFLUSH;
}
/*
* @policy now records what operations need to be done. Adjust
* REQ_PREFLUSH and FUA for the driver.
*/
rq->cmd_flags &= ~REQ_PREFLUSH;
if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
if (!supports_fua)
rq->cmd_flags &= ~REQ_FUA;
/*

View File

@ -84,28 +84,15 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(NOMERGES),
QUEUE_FLAG_NAME(SAME_COMP),
QUEUE_FLAG_NAME(FAIL_IO),
QUEUE_FLAG_NAME(NONROT),
QUEUE_FLAG_NAME(IO_STAT),
QUEUE_FLAG_NAME(NOXMERGES),
QUEUE_FLAG_NAME(ADD_RANDOM),
QUEUE_FLAG_NAME(SYNCHRONOUS),
QUEUE_FLAG_NAME(SAME_FORCE),
QUEUE_FLAG_NAME(INIT_DONE),
QUEUE_FLAG_NAME(STABLE_WRITES),
QUEUE_FLAG_NAME(POLL),
QUEUE_FLAG_NAME(WC),
QUEUE_FLAG_NAME(FUA),
QUEUE_FLAG_NAME(DAX),
QUEUE_FLAG_NAME(STATS),
QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(QUIESCED),
QUEUE_FLAG_NAME(PCI_P2PDMA),
QUEUE_FLAG_NAME(ZONE_RESETALL),
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
QUEUE_FLAG_NAME(HCTX_ACTIVE),
QUEUE_FLAG_NAME(NOWAIT),
QUEUE_FLAG_NAME(SQ_SCHED),
QUEUE_FLAG_NAME(SKIP_TAGSET_QUIESCE),
};
#undef QUEUE_FLAG_NAME

View File

@ -4109,6 +4109,12 @@ void blk_mq_release(struct request_queue *q)
blk_mq_sysfs_deinit(q);
}
static bool blk_mq_can_poll(struct blk_mq_tag_set *set)
{
return set->nr_maps > HCTX_TYPE_POLL &&
set->map[HCTX_TYPE_POLL].nr_queues;
}
struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
struct queue_limits *lim, void *queuedata)
{
@ -4116,7 +4122,13 @@ struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
struct request_queue *q;
int ret;
q = blk_alloc_queue(lim ? lim : &default_lim, set->numa_node);
if (!lim)
lim = &default_lim;
lim->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT;
if (blk_mq_can_poll(set))
lim->features |= BLK_FEAT_POLL;
q = blk_alloc_queue(lim, set->numa_node);
if (IS_ERR(q))
return q;
q->queuedata = queuedata;
@ -4269,17 +4281,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
mutex_unlock(&q->sysfs_lock);
}
static void blk_mq_update_poll_flag(struct request_queue *q)
{
struct blk_mq_tag_set *set = q->tag_set;
if (set->nr_maps > HCTX_TYPE_POLL &&
set->map[HCTX_TYPE_POLL].nr_queues)
blk_queue_flag_set(QUEUE_FLAG_POLL, q);
else
blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
}
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q)
{
@ -4307,7 +4308,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
q->tag_set = set;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
blk_mq_update_poll_flag(q);
INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
INIT_LIST_HEAD(&q->flush_list);
@ -4631,13 +4631,15 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
int ret;
unsigned long i;
if (WARN_ON_ONCE(!q->mq_freeze_depth))
return -EINVAL;
if (!set)
return -EINVAL;
if (q->nr_requests == nr)
return 0;
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
ret = 0;
@ -4671,7 +4673,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
}
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return ret;
}
@ -4793,8 +4794,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
fallback:
blk_mq_update_queue_map(set);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
struct queue_limits lim;
blk_mq_realloc_hw_ctxs(set, q);
blk_mq_update_poll_flag(q);
if (q->nr_hw_queues != set->nr_hw_queues) {
int i = prev_nr_hw_queues;
@ -4806,6 +4809,13 @@ fallback:
set->nr_hw_queues = prev_nr_hw_queues;
goto fallback;
}
lim = queue_limits_start_update(q);
if (blk_mq_can_poll(set))
lim.features |= BLK_FEAT_POLL;
else
lim.features &= ~BLK_FEAT_POLL;
if (queue_limits_commit_update(q, &lim) < 0)
pr_warn("updating the poll flag failed\n");
blk_mq_map_swqueue(q);
}

View File

@ -68,7 +68,7 @@ static void blk_apply_bdi_limits(struct backing_dev_info *bdi,
static int blk_validate_zoned_limits(struct queue_limits *lim)
{
if (!lim->zoned) {
if (!(lim->features & BLK_FEAT_ZONED)) {
if (WARN_ON_ONCE(lim->max_open_zones) ||
WARN_ON_ONCE(lim->max_active_zones) ||
WARN_ON_ONCE(lim->zone_write_granularity) ||
@ -269,6 +269,9 @@ static int blk_validate_limits(struct queue_limits *lim)
lim->misaligned = 0;
}
if (!(lim->features & BLK_FEAT_WRITE_CACHE))
lim->features &= ~BLK_FEAT_FUA;
err = blk_validate_integrity_limits(lim);
if (err)
return err;
@ -461,6 +464,19 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
{
unsigned int top, bottom, alignment, ret = 0;
t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
/*
* BLK_FEAT_NOWAIT and BLK_FEAT_POLL need to be supported both by the
* stacking driver and all underlying devices. The stacking driver sets
* the flags before stacking the limits, and this will clear the flags
* if any of the underlying devices does not support it.
*/
if (!(b->features & BLK_FEAT_NOWAIT))
t->features &= ~BLK_FEAT_NOWAIT;
if (!(b->features & BLK_FEAT_POLL))
t->features &= ~BLK_FEAT_POLL;
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_user_sectors = min_not_zero(t->max_user_sectors,
b->max_user_sectors);
@ -470,7 +486,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->max_write_zeroes_sectors);
t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t),
queue_limits_max_zone_append_sectors(b));
t->bounce = max(t->bounce, b->bounce);
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
b->seg_boundary_mask);
@ -593,8 +608,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->max_secure_erase_sectors);
t->zone_write_granularity = max(t->zone_write_granularity,
b->zone_write_granularity);
t->zoned = max(t->zoned, b->zoned);
if (!t->zoned) {
if (!(t->features & BLK_FEAT_ZONED)) {
t->zone_write_granularity = 0;
t->max_zone_append_sectors = 0;
}
@ -718,30 +732,6 @@ void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
}
EXPORT_SYMBOL(blk_set_queue_depth);
/**
* blk_queue_write_cache - configure queue's write cache
* @q: the request queue for the device
* @wc: write back cache on or off
* @fua: device supports FUA writes, if true
*
* Tell the block layer about the write cache of @q.
*/
void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
{
if (wc) {
blk_queue_flag_set(QUEUE_FLAG_HW_WC, q);
blk_queue_flag_set(QUEUE_FLAG_WC, q);
} else {
blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q);
blk_queue_flag_clear(QUEUE_FLAG_WC, q);
}
if (fua)
blk_queue_flag_set(QUEUE_FLAG_FUA, q);
else
blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
}
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
int bdev_alignment_offset(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);

View File

@ -189,12 +189,9 @@ static ssize_t queue_discard_max_store(struct request_queue *q,
if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
return -EINVAL;
blk_mq_freeze_queue(q);
lim = queue_limits_start_update(q);
lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
err = queue_limits_commit_update(q, &lim);
blk_mq_unfreeze_queue(q);
if (err)
return err;
return ret;
@ -241,11 +238,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0)
return ret;
blk_mq_freeze_queue(q);
lim = queue_limits_start_update(q);
lim.max_user_sectors = max_sectors_kb << 1;
err = queue_limits_commit_update(q, &lim);
blk_mq_unfreeze_queue(q);
if (err)
return err;
return ret;
@ -268,37 +263,43 @@ static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
return queue_var_show(queue_dma_alignment(q), page);
}
#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
static ssize_t \
queue_##name##_show(struct request_queue *q, char *page) \
{ \
int bit; \
bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
return queue_var_show(neg ? !bit : bit, page); \
} \
static ssize_t \
queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
{ \
unsigned long val; \
ssize_t ret; \
ret = queue_var_store(&val, page, count); \
if (ret < 0) \
return ret; \
if (neg) \
val = !val; \
\
if (val) \
blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
else \
blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
return ret; \
static ssize_t queue_feature_store(struct request_queue *q, const char *page,
size_t count, unsigned int feature)
{
struct queue_limits lim;
unsigned long val;
ssize_t ret;
ret = queue_var_store(&val, page, count);
if (ret < 0)
return ret;
lim = queue_limits_start_update(q);
if (val)
lim.features |= feature;
else
lim.features &= ~feature;
ret = queue_limits_commit_update(q, &lim);
if (ret)
return ret;
return count;
}
QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
#undef QUEUE_SYSFS_BIT_FNS
#define QUEUE_SYSFS_FEATURE(_name, _feature) \
static ssize_t queue_##_name##_show(struct request_queue *q, char *page) \
{ \
return sprintf(page, "%u\n", !!(q->limits.features & _feature)); \
} \
static ssize_t queue_##_name##_store(struct request_queue *q, \
const char *page, size_t count) \
{ \
return queue_feature_store(q, page, count, _feature); \
}
QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL)
QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM)
QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT)
QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES);
static ssize_t queue_zoned_show(struct request_queue *q, char *page)
{
@ -393,13 +394,13 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
static ssize_t queue_poll_show(struct request_queue *q, char *page)
{
return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
return queue_var_show(q->limits.features & BLK_FEAT_POLL, page);
}
static ssize_t queue_poll_store(struct request_queue *q, const char *page,
size_t count)
{
if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
if (!(q->limits.features & BLK_FEAT_POLL))
return -EINVAL;
pr_info_ratelimited("writes to the poll attribute are ignored.\n");
pr_info_ratelimited("please use driver specific parameters instead.\n");
@ -428,32 +429,41 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
static ssize_t queue_wc_show(struct request_queue *q, char *page)
{
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
return sprintf(page, "write back\n");
return sprintf(page, "write through\n");
if (q->limits.features & BLK_FLAGS_WRITE_CACHE_DISABLED)
return sprintf(page, "write through\n");
return sprintf(page, "write back\n");
}
static ssize_t queue_wc_store(struct request_queue *q, const char *page,
size_t count)
{
struct queue_limits lim;
bool disable;
int err;
if (!strncmp(page, "write back", 10)) {
if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags))
return -EINVAL;
blk_queue_flag_set(QUEUE_FLAG_WC, q);
disable = false;
} else if (!strncmp(page, "write through", 13) ||
!strncmp(page, "none", 4)) {
blk_queue_flag_clear(QUEUE_FLAG_WC, q);
!strncmp(page, "none", 4)) {
disable = true;
} else {
return -EINVAL;
}
lim = queue_limits_start_update(q);
if (disable)
lim.flags |= BLK_FLAGS_WRITE_CACHE_DISABLED;
else
lim.flags &= ~BLK_FLAGS_WRITE_CACHE_DISABLED;
err = queue_limits_commit_update(q, &lim);
if (err)
return err;
return count;
}
static ssize_t queue_fua_show(struct request_queue *q, char *page)
{
return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
return sprintf(page, "%u\n", !!(q->limits.features & BLK_FEAT_FUA));
}
static ssize_t queue_dax_show(struct request_queue *q, char *page)
@ -522,9 +532,9 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = {
.show = queue_logical_block_size_show,
};
QUEUE_RW_ENTRY(queue_nonrot, "rotational");
QUEUE_RW_ENTRY(queue_rotational, "rotational");
QUEUE_RW_ENTRY(queue_iostats, "iostats");
QUEUE_RW_ENTRY(queue_random, "add_random");
QUEUE_RW_ENTRY(queue_add_random, "add_random");
QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
#ifdef CONFIG_BLK_WBT
@ -585,13 +595,11 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
* ends up either enabling or disabling wbt completely. We can't
* have IO inflight if that happens.
*/
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
wbt_set_min_lat(q, val);
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return count;
}
@ -622,7 +630,7 @@ static struct attribute *queue_attrs[] = {
&queue_write_zeroes_max_entry.attr,
&queue_zone_append_max_entry.attr,
&queue_zone_write_granularity_entry.attr,
&queue_nonrot_entry.attr,
&queue_rotational_entry.attr,
&queue_zoned_entry.attr,
&queue_nr_zones_entry.attr,
&queue_max_open_zones_entry.attr,
@ -630,7 +638,7 @@ static struct attribute *queue_attrs[] = {
&queue_nomerges_entry.attr,
&queue_iostats_entry.attr,
&queue_stable_writes_entry.attr,
&queue_random_entry.attr,
&queue_add_random_entry.attr,
&queue_poll_entry.attr,
&queue_wc_entry.attr,
&queue_fua_entry.attr,
@ -722,9 +730,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
if (!entry->store)
return -EIO;
blk_mq_freeze_queue(q);
mutex_lock(&q->sysfs_lock);
res = entry->store(q, page, length);
mutex_unlock(&q->sysfs_lock);
blk_mq_unfreeze_queue(q);
return res;
}

View File

@ -206,8 +206,8 @@ static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
*/
if (wb_acct & WBT_DISCARD)
limit = rwb->wb_background;
else if (test_bit(QUEUE_FLAG_WC, &rwb->rqos.disk->queue->queue_flags) &&
!wb_recent_wait(rwb))
else if (blk_queue_write_cache(rwb->rqos.disk->queue) &&
!wb_recent_wait(rwb))
limit = 0;
else
limit = rwb->wb_normal;

View File

@ -394,7 +394,7 @@ struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
static inline bool blk_queue_may_bounce(struct request_queue *q)
{
return IS_ENABLED(CONFIG_BOUNCE) &&
q->limits.bounce == BLK_BOUNCE_HIGH &&
(q->limits.features & BLK_FEAT_BOUNCE_HIGH) &&
max_low_pfn >= max_pfn;
}

View File

@ -1777,10 +1777,13 @@ static const struct blk_mq_ops amiflop_mq_ops = {
static int fd_alloc_disk(int drive, int system)
{
struct queue_limits lim = {
.features = BLK_FEAT_ROTATIONAL,
};
struct gendisk *disk;
int err;
disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL);
disk = blk_mq_alloc_disk(&unit[drive].tag_set, &lim, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);

View File

@ -337,6 +337,7 @@ aoeblk_gdalloc(void *vp)
struct queue_limits lim = {
.max_hw_sectors = aoe_maxsectors,
.io_opt = SZ_2M,
.features = BLK_FEAT_ROTATIONAL,
};
ulong flags;
int late = 0;

View File

@ -1992,9 +1992,12 @@ static const struct blk_mq_ops ataflop_mq_ops = {
static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
{
struct queue_limits lim = {
.features = BLK_FEAT_ROTATIONAL,
};
struct gendisk *disk;
disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL);
disk = blk_mq_alloc_disk(&unit[drive].tag_set, &lim, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);

View File

@ -335,6 +335,8 @@ static int brd_alloc(int i)
.max_hw_discard_sectors = UINT_MAX,
.max_discard_segments = 1,
.discard_granularity = PAGE_SIZE,
.features = BLK_FEAT_SYNCHRONOUS |
BLK_FEAT_NOWAIT,
};
list_for_each_entry(brd, &brd_devices, brd_list)
@ -366,10 +368,6 @@ static int brd_alloc(int i)
strscpy(disk->disk_name, buf, DISK_NAME_LEN);
set_capacity(disk, rd_size * 2);
/* Tell the block layer that this is not a rotational device */
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue);
err = add_disk(disk);
if (err)
goto out_cleanup_disk;

View File

@ -2697,6 +2697,9 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
* connect.
*/
.max_hw_sectors = DRBD_MAX_BIO_SIZE_SAFE >> 8,
.features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA |
BLK_FEAT_ROTATIONAL |
BLK_FEAT_STABLE_WRITES,
};
device = minor_to_device(minor);
@ -2735,9 +2738,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
sprintf(disk->disk_name, "drbd%d", minor);
disk->private_data = device;
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
blk_queue_write_cache(disk->queue, true, true);
device->md_io.page = alloc_page(GFP_KERNEL);
if (!device->md_io.page)
goto out_no_io_page;

View File

@ -4516,7 +4516,8 @@ static bool floppy_available(int drive)
static int floppy_alloc_disk(unsigned int drive, unsigned int type)
{
struct queue_limits lim = {
.max_hw_sectors = 64,
.max_hw_sectors = 64,
.features = BLK_FEAT_ROTATIONAL,
};
struct gendisk *disk;

View File

@ -916,24 +916,6 @@ static void loop_free_idle_workers_timer(struct timer_list *timer)
return loop_free_idle_workers(lo, false);
}
static void loop_update_rotational(struct loop_device *lo)
{
struct file *file = lo->lo_backing_file;
struct inode *file_inode = file->f_mapping->host;
struct block_device *file_bdev = file_inode->i_sb->s_bdev;
struct request_queue *q = lo->lo_queue;
bool nonrot = true;
/* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
if (file_bdev)
nonrot = bdev_nonrot(file_bdev);
if (nonrot)
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
else
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
}
/**
* loop_set_status_from_info - configure device from loop_info
* @lo: struct loop_device to configure
@ -975,17 +957,40 @@ loop_set_status_from_info(struct loop_device *lo,
return 0;
}
static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize,
bool update_discard_settings)
static unsigned short loop_default_blocksize(struct loop_device *lo,
struct block_device *backing_bdev)
{
/* In case of direct I/O, match underlying block size */
if ((lo->lo_backing_file->f_flags & O_DIRECT) && backing_bdev)
return bdev_logical_block_size(backing_bdev);
return SECTOR_SIZE;
}
static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize)
{
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct block_device *backing_bdev = NULL;
struct queue_limits lim;
if (S_ISBLK(inode->i_mode))
backing_bdev = I_BDEV(inode);
else if (inode->i_sb->s_bdev)
backing_bdev = inode->i_sb->s_bdev;
if (!bsize)
bsize = loop_default_blocksize(lo, backing_bdev);
lim = queue_limits_start_update(lo->lo_queue);
lim.logical_block_size = bsize;
lim.physical_block_size = bsize;
lim.io_min = bsize;
if (update_discard_settings)
loop_config_discard(lo, &lim);
lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY))
lim.features |= BLK_FEAT_WRITE_CACHE;
if (backing_bdev && !bdev_nonrot(backing_bdev))
lim.features |= BLK_FEAT_ROTATIONAL;
loop_config_discard(lo, &lim);
return queue_limits_commit_update(lo->lo_queue, &lim);
}
@ -999,7 +1004,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
int error;
loff_t size;
bool partscan;
unsigned short bsize;
bool is_loop;
if (!file)
@ -1075,22 +1079,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_write_cache(lo->lo_queue, true, false);
if (config->block_size)
bsize = config->block_size;
else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev)
/* In case of direct I/O, match underlying block size */
bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
else
bsize = 512;
error = loop_reconfigure_limits(lo, bsize, true);
error = loop_reconfigure_limits(lo, config->block_size);
if (WARN_ON_ONCE(error))
goto out_unlock;
loop_update_rotational(lo);
loop_update_dio(lo);
loop_sysfs_init(lo);
@ -1133,12 +1125,10 @@ out_putf:
static void __loop_clr_fd(struct loop_device *lo, bool release)
{
struct queue_limits lim;
struct file *filp;
gfp_t gfp = lo->old_gfp_mask;
if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
blk_queue_write_cache(lo->lo_queue, false, false);
/*
* Freeze the request queue when unbinding on a live file descriptor and
* thus an open device. When called from ->release we are guaranteed
@ -1156,7 +1146,14 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
loop_reconfigure_limits(lo, 512, false);
/* reset the block size to the default */
lim = queue_limits_start_update(lo->lo_queue);
lim.logical_block_size = SECTOR_SIZE;
lim.physical_block_size = SECTOR_SIZE;
lim.io_min = SECTOR_SIZE;
queue_limits_commit_update(lo->lo_queue, &lim);
invalidate_disk(lo->lo_disk);
loop_sysfs_exit(lo);
/* let user-space know about this change */
@ -1488,7 +1485,7 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
invalidate_bdev(lo->lo_device);
blk_mq_freeze_queue(lo->lo_queue);
err = loop_reconfigure_limits(lo, arg, false);
err = loop_reconfigure_limits(lo, arg);
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue);

View File

@ -3485,8 +3485,6 @@ skip_create_disk:
goto start_service_thread;
/* Set device limits. */
blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue);
dma_set_max_seg_size(&dd->pdev->dev, 0x400000);
/* Set the capacity of the device in 512 byte sectors. */

View File

@ -150,8 +150,6 @@ static int __init n64cart_probe(struct platform_device *pdev)
set_capacity(disk, size >> SECTOR_SHIFT);
set_disk_ro(disk, 1);
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
err = add_disk(disk);
if (err)
goto out_cleanup_disk;

View File

@ -342,6 +342,14 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
lim.max_hw_discard_sectors = UINT_MAX;
else
lim.max_hw_discard_sectors = 0;
if (!(nbd->config->flags & NBD_FLAG_SEND_FLUSH)) {
lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
} else if (nbd->config->flags & NBD_FLAG_SEND_FUA) {
lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
} else {
lim.features |= BLK_FEAT_WRITE_CACHE;
lim.features &= ~BLK_FEAT_FUA;
}
lim.logical_block_size = blksize;
lim.physical_block_size = blksize;
error = queue_limits_commit_update(nbd->disk->queue, &lim);
@ -1286,19 +1294,10 @@ static void nbd_bdev_reset(struct nbd_device *nbd)
static void nbd_parse_flags(struct nbd_device *nbd)
{
struct nbd_config *config = nbd->config;
if (config->flags & NBD_FLAG_READ_ONLY)
if (nbd->config->flags & NBD_FLAG_READ_ONLY)
set_disk_ro(nbd->disk, true);
else
set_disk_ro(nbd->disk, false);
if (config->flags & NBD_FLAG_SEND_FLUSH) {
if (config->flags & NBD_FLAG_SEND_FUA)
blk_queue_write_cache(nbd->disk->queue, true, true);
else
blk_queue_write_cache(nbd->disk->queue, true, false);
}
else
blk_queue_write_cache(nbd->disk->queue, false, false);
}
static void send_disconnects(struct nbd_device *nbd)
@ -1868,11 +1867,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
goto out_err_disk;
}
/*
* Tell the block layer that we are not a rotational device
*/
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
mutex_init(&nbd->config_lock);
refcount_set(&nbd->config_refs, 0);
/*

View File

@ -1928,6 +1928,13 @@ static int null_add_dev(struct nullb_device *dev)
goto out_cleanup_tags;
}
if (dev->cache_size > 0) {
set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
lim.features |= BLK_FEAT_WRITE_CACHE;
if (dev->fua)
lim.features |= BLK_FEAT_FUA;
}
nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
if (IS_ERR(nullb->disk)) {
rv = PTR_ERR(nullb->disk);
@ -1940,13 +1947,7 @@ static int null_add_dev(struct nullb_device *dev)
nullb_setup_bwtimer(nullb);
}
if (dev->cache_size > 0) {
set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
blk_queue_write_cache(nullb->q, true, dev->fua);
}
nullb->q->queuedata = nullb;
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
if (rv < 0)

View File

@ -158,7 +158,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
sector += dev->zone_size_sects;
}
lim->zoned = true;
lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
lim->chunk_sectors = dev->zone_size_sects;
lim->max_zone_append_sectors = dev->zone_append_max_sectors;
lim->max_open_zones = dev->zone_max_open;
@ -171,7 +171,6 @@ int null_register_zoned_dev(struct nullb *nullb)
struct request_queue *q = nullb->q;
struct gendisk *disk = nullb->disk;
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
disk->nr_zones = bdev_nr_zones(disk->part0);
pr_info("%s: using %s zone append\n",

View File

@ -2622,6 +2622,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
struct queue_limits lim = {
.max_hw_sectors = PACKET_MAX_SECTORS,
.logical_block_size = CD_FRAMESIZE,
.features = BLK_FEAT_ROTATIONAL,
};
int idx;
int ret = -ENOMEM;

View File

@ -388,9 +388,9 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
.max_segments = -1,
.max_segment_size = dev->bounce_size,
.dma_alignment = dev->blk_size - 1,
.features = BLK_FEAT_WRITE_CACHE |
BLK_FEAT_ROTATIONAL,
};
struct request_queue *queue;
struct gendisk *gendisk;
if (dev->blk_size < 512) {
@ -447,10 +447,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
goto fail_free_tag_set;
}
queue = gendisk->queue;
blk_queue_write_cache(queue, true, false);
priv->gendisk = gendisk;
gendisk->major = ps3disk_major;
gendisk->first_minor = devidx * PS3DISK_MINORS;

View File

@ -4949,7 +4949,6 @@ static const struct blk_mq_ops rbd_mq_ops = {
static int rbd_init_disk(struct rbd_device *rbd_dev)
{
struct gendisk *disk;
struct request_queue *q;
unsigned int objset_bytes =
rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
struct queue_limits lim = {
@ -4979,12 +4978,14 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
lim.max_write_zeroes_sectors = objset_bytes >> SECTOR_SHIFT;
}
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
lim.features |= BLK_FEAT_STABLE_WRITES;
disk = blk_mq_alloc_disk(&rbd_dev->tag_set, &lim, rbd_dev);
if (IS_ERR(disk)) {
err = PTR_ERR(disk);
goto out_tag_set;
}
q = disk->queue;
snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
rbd_dev->dev_id);
@ -4996,13 +4997,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
disk->minors = RBD_MINORS_PER_MAJOR;
disk->fops = &rbd_bd_ops;
disk->private_data = rbd_dev;
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
rbd_dev->disk = disk;
return 0;

View File

@ -1352,10 +1352,6 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev,
if (dev->access_mode == RNBD_ACCESS_RO)
set_disk_ro(dev->gd, true);
/*
* Network device does not need rotational
*/
blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
err = add_disk(dev->gd);
if (err)
put_disk(dev->gd);
@ -1389,6 +1385,12 @@ static int rnbd_client_setup_device(struct rnbd_clt_dev *dev,
le32_to_cpu(rsp->max_discard_sectors);
}
if (rsp->cache_policy & RNBD_WRITEBACK) {
lim.features |= BLK_FEAT_WRITE_CACHE;
if (rsp->cache_policy & RNBD_FUA)
lim.features |= BLK_FEAT_FUA;
}
dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, &lim, dev);
if (IS_ERR(dev->gd))
return PTR_ERR(dev->gd);
@ -1397,10 +1399,6 @@ static int rnbd_client_setup_device(struct rnbd_clt_dev *dev,
blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
blk_queue_write_cache(dev->queue,
!!(rsp->cache_policy & RNBD_WRITEBACK),
!!(rsp->cache_policy & RNBD_FUA));
return rnbd_clt_setup_gen_disk(dev, rsp, idx);
}

View File

@ -791,6 +791,7 @@ static int probe_disk(struct vdc_port *port)
.seg_boundary_mask = PAGE_SIZE - 1,
.max_segment_size = PAGE_SIZE,
.max_segments = port->ring_cookies,
.features = BLK_FEAT_ROTATIONAL,
};
struct request_queue *q;
struct gendisk *g;

View File

@ -787,6 +787,9 @@ static void swim_cleanup_floppy_disk(struct floppy_state *fs)
static int swim_floppy_init(struct swim_priv *swd)
{
struct queue_limits lim = {
.features = BLK_FEAT_ROTATIONAL,
};
int err;
int drive;
struct swim __iomem *base = swd->base;
@ -820,7 +823,7 @@ static int swim_floppy_init(struct swim_priv *swd)
goto exit_put_disks;
swd->unit[drive].disk =
blk_mq_alloc_disk(&swd->unit[drive].tag_set, NULL,
blk_mq_alloc_disk(&swd->unit[drive].tag_set, &lim,
&swd->unit[drive]);
if (IS_ERR(swd->unit[drive].disk)) {
blk_mq_free_tag_set(&swd->unit[drive].tag_set);

View File

@ -1189,6 +1189,9 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
static int swim3_attach(struct macio_dev *mdev,
const struct of_device_id *match)
{
struct queue_limits lim = {
.features = BLK_FEAT_ROTATIONAL,
};
struct floppy_state *fs;
struct gendisk *disk;
int rc;
@ -1210,7 +1213,7 @@ static int swim3_attach(struct macio_dev *mdev,
if (rc)
goto out_unregister;
disk = blk_mq_alloc_disk(&fs->tag_set, NULL, fs);
disk = blk_mq_alloc_disk(&fs->tag_set, &lim, fs);
if (IS_ERR(disk)) {
rc = PTR_ERR(disk);
goto out_free_tag_set;

View File

@ -248,8 +248,6 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
{
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
}
@ -484,16 +482,8 @@ static inline unsigned ublk_pos_to_tag(loff_t pos)
static void ublk_dev_param_basic_apply(struct ublk_device *ub)
{
struct request_queue *q = ub->ub_disk->queue;
const struct ublk_param_basic *p = &ub->params.basic;
blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
p->attrs & UBLK_ATTR_FUA);
if (p->attrs & UBLK_ATTR_ROTATIONAL)
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
else
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
if (p->attrs & UBLK_ATTR_READ_ONLY)
set_disk_ro(ub->ub_disk, true);
@ -2204,12 +2194,21 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
return -EOPNOTSUPP;
lim.zoned = true;
lim.features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
lim.max_active_zones = p->max_active_zones;
lim.max_open_zones = p->max_open_zones;
lim.max_zone_append_sectors = p->max_zone_append_sectors;
}
if (ub->params.basic.attrs & UBLK_ATTR_VOLATILE_CACHE) {
lim.features |= BLK_FEAT_WRITE_CACHE;
if (ub->params.basic.attrs & UBLK_ATTR_FUA)
lim.features |= BLK_FEAT_FUA;
}
if (ub->params.basic.attrs & UBLK_ATTR_ROTATIONAL)
lim.features |= BLK_FEAT_ROTATIONAL;
if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;

View File

@ -728,7 +728,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
lim->zoned = true;
lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_open_zones, &v);
@ -1089,14 +1089,6 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev)
return writeback;
}
static void virtblk_update_cache_mode(struct virtio_device *vdev)
{
u8 writeback = virtblk_get_cache_mode(vdev);
struct virtio_blk *vblk = vdev->priv;
blk_queue_write_cache(vblk->disk->queue, writeback, false);
}
static const char *const virtblk_cache_types[] = {
"write through", "write back"
};
@ -1108,6 +1100,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
struct virtio_device *vdev = vblk->vdev;
struct queue_limits lim;
int i;
BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
@ -1116,7 +1109,17 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
return i;
virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
virtblk_update_cache_mode(vdev);
lim = queue_limits_start_update(disk->queue);
if (virtblk_get_cache_mode(vdev))
lim.features |= BLK_FEAT_WRITE_CACHE;
else
lim.features &= ~BLK_FEAT_WRITE_CACHE;
blk_mq_freeze_queue(disk->queue);
i = queue_limits_commit_update(disk->queue, &lim);
blk_mq_unfreeze_queue(disk->queue);
if (i)
return i;
return count;
}
@ -1448,7 +1451,9 @@ static int virtblk_read_limits(struct virtio_blk *vblk,
static int virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
struct queue_limits lim = { };
struct queue_limits lim = {
.features = BLK_FEAT_ROTATIONAL,
};
int err, index;
unsigned int queue_depth;
@ -1512,6 +1517,9 @@ static int virtblk_probe(struct virtio_device *vdev)
if (err)
goto out_free_tags;
if (virtblk_get_cache_mode(vdev))
lim.features |= BLK_FEAT_WRITE_CACHE;
vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, &lim, vblk);
if (IS_ERR(vblk->disk)) {
err = PTR_ERR(vblk->disk);
@ -1527,9 +1535,6 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->disk->fops = &virtblk_fops;
vblk->index = index;
/* configure queue flush support */
virtblk_update_cache_mode(vdev);
/* If disk is read-only in the host, the guest should obey */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
set_disk_ro(vblk->disk, 1);
@ -1541,8 +1546,8 @@ static int virtblk_probe(struct virtio_device *vdev)
* All steps that follow use the VQs therefore they need to be
* placed after the virtio_device_ready() call above.
*/
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && lim.zoned) {
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue);
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
(lim.features & BLK_FEAT_ZONED)) {
err = blk_revalidate_disk_zones(vblk->disk);
if (err)
goto out_cleanup_disk;

View File

@ -788,6 +788,11 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
* A barrier request a superset of FUA, so we can
* implement it the same way. (It's also a FLUSH+FUA,
* since it is guaranteed ordered WRT previous writes.)
*
* Note that can end up here with a FUA write and the
* flags cleared. This happens when the flag was
* run-time disabled after a failing I/O, and we'll
* simplify submit it as a normal write.
*/
if (info->feature_flush && info->feature_fua)
ring_req->operation =
@ -795,8 +800,6 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
else if (info->feature_flush)
ring_req->operation =
BLKIF_OP_FLUSH_DISKCACHE;
else
ring_req->operation = 0;
}
ring_req->u.rw.nr_segments = num_grant;
if (unlikely(require_extra_req)) {
@ -887,16 +890,6 @@ static inline void flush_requests(struct blkfront_ring_info *rinfo)
notify_remote_via_irq(rinfo->irq);
}
static inline bool blkif_request_flush_invalid(struct request *req,
struct blkfront_info *info)
{
return (blk_rq_is_passthrough(req) ||
((req_op(req) == REQ_OP_FLUSH) &&
!info->feature_flush) ||
((req->cmd_flags & REQ_FUA) &&
!info->feature_fua));
}
static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *qd)
{
@ -908,12 +901,22 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
rinfo = get_rinfo(info, qid);
blk_mq_start_request(qd->rq);
spin_lock_irqsave(&rinfo->ring_lock, flags);
/*
* Check if the backend actually supports flushes.
*
* While the block layer won't send us flushes if we don't claim to
* support them, the Xen protocol allows the backend to revoke support
* at any time. That is of course a really bad idea and dangerous, but
* has been allowed for 10+ years. In that case we simply clear the
* flags, and directly return here for an empty flush and ignore the
* FUA flag later on.
*/
if (unlikely(req_op(qd->rq) == REQ_OP_FLUSH && !info->feature_flush))
goto complete;
if (RING_FULL(&rinfo->ring))
goto out_busy;
if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
goto out_err;
if (blkif_queue_request(qd->rq, rinfo))
goto out_busy;
@ -921,14 +924,14 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return BLK_STS_OK;
out_err:
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return BLK_STS_IOERR;
out_busy:
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return BLK_STS_DEV_RESOURCE;
complete:
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
blk_mq_end_request(qd->rq, BLK_STS_OK);
return BLK_STS_OK;
}
static void blkif_complete_rq(struct request *rq)
@ -956,6 +959,12 @@ static void blkif_set_queue_limits(const struct blkfront_info *info,
lim->max_secure_erase_sectors = UINT_MAX;
}
if (info->feature_flush) {
lim->features |= BLK_FEAT_WRITE_CACHE;
if (info->feature_fua)
lim->features |= BLK_FEAT_FUA;
}
/* Hard sector size and max sectors impersonate the equiv. hardware. */
lim->logical_block_size = info->sector_size;
lim->physical_block_size = info->physical_sector_size;
@ -984,8 +993,6 @@ static const char *flush_info(struct blkfront_info *info)
static void xlvbd_flush(struct blkfront_info *info)
{
blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
info->feature_fua ? true : false);
pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
info->gd->disk_name, flush_info(info),
"persistent grants:", info->feature_persistent ?
@ -1139,7 +1146,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
err = PTR_ERR(gd);
goto out_free_tag_set;
}
blk_queue_flag_set(QUEUE_FLAG_VIRT, gd->queue);
strcpy(gd->disk_name, DEV_NAME);
ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
@ -1627,7 +1633,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
blkif_req(req)->error = BLK_STS_OK;
info->feature_fua = 0;
info->feature_flush = 0;
xlvbd_flush(info);
}
fallthrough;
case BLKIF_OP_READ:

View File

@ -2208,6 +2208,8 @@ static int zram_add(void)
#if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE
.max_write_zeroes_sectors = UINT_MAX,
#endif
.features = BLK_FEAT_STABLE_WRITES |
BLK_FEAT_SYNCHRONOUS,
};
struct zram *zram;
int ret, device_id;
@ -2245,10 +2247,6 @@ static int zram_add(void)
/* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
/* zram devices sort of resembles non-rotational disks */
blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
if (ret)
goto out_cleanup_disk;

View File

@ -744,6 +744,7 @@ static int probe_gdrom(struct platform_device *devptr)
.max_segments = 1,
/* set a large max size to get most from DMA */
.max_segment_size = 0x40000,
.features = BLK_FEAT_ROTATIONAL,
};
int err;

View File

@ -897,7 +897,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
sector_t sectors, struct block_device *cached_bdev,
const struct block_device_operations *ops)
{
struct request_queue *q;
const size_t max_stripes = min_t(size_t, INT_MAX,
SIZE_MAX / sizeof(atomic_t));
struct queue_limits lim = {
@ -909,6 +908,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
.io_min = block_size,
.logical_block_size = block_size,
.physical_block_size = block_size,
.features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA,
};
uint64_t n;
int idx;
@ -974,13 +974,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
d->disk->minors = BCACHE_MINORS;
d->disk->fops = ops;
d->disk->private_data = d;
q = d->disk->queue;
blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
blk_queue_write_cache(q, true, true);
return 0;
out_bioset_exit:

View File

@ -579,6 +579,12 @@ int dm_split_args(int *argc, char ***argvp, char *input)
return 0;
}
static void dm_set_stacking_limits(struct queue_limits *limits)
{
blk_set_stacking_limits(limits);
limits->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
}
/*
* Impose necessary and sufficient conditions on a devices's table such
* that any incoming bio which respects its logical_block_size can be
@ -617,7 +623,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *t,
for (i = 0; i < t->num_targets; i++) {
ti = dm_table_get_target(t, i);
blk_set_stacking_limits(&ti_limits);
dm_set_stacking_limits(&ti_limits);
/* combine all target devices' limits */
if (ti->type->iterate_devices)
@ -1018,14 +1024,13 @@ bool dm_table_request_based(struct dm_table *t)
return __table_type_request_based(dm_table_get_type(t));
}
static bool dm_table_supports_poll(struct dm_table *t);
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
enum dm_queue_mode type = dm_table_get_type(t);
unsigned int per_io_data_size = 0, front_pad, io_front_pad;
unsigned int min_pool_size = 0, pool_size;
struct dm_md_mempools *pools;
unsigned int bioset_flags = 0;
if (unlikely(type == DM_TYPE_NONE)) {
DMERR("no table type is set, can't allocate mempools");
@ -1042,6 +1047,9 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
goto init_bs;
}
if (md->queue->limits.features & BLK_FEAT_POLL)
bioset_flags |= BIOSET_PERCPU_CACHE;
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
@ -1054,8 +1062,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
io_front_pad = roundup(per_io_data_size,
__alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
if (bioset_init(&pools->io_bs, pool_size, io_front_pad,
dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0))
if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags))
goto out_free_pools;
if (t->integrity_supported &&
bioset_integrity_create(&pools->io_bs, pool_size))
@ -1398,14 +1405,6 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
return &t->targets[(KEYS_PER_NODE * n) + k];
}
static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags);
}
/*
* type->iterate_devices() should be called when the sanity check needs to
* iterate and check all underlying data devices. iterate_devices() will
@ -1453,19 +1452,6 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
static bool dm_table_supports_poll(struct dm_table *t)
{
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices ||
ti->type->iterate_devices(ti, device_not_poll_capable, NULL))
return false;
}
return true;
}
/*
* Check whether a table has no data devices attached using each
* target's iterate_devices method.
@ -1591,7 +1577,7 @@ int dm_calculate_queue_limits(struct dm_table *t,
unsigned int zone_sectors = 0;
bool zoned = false;
blk_set_stacking_limits(limits);
dm_set_stacking_limits(limits);
t->integrity_supported = true;
for (unsigned int i = 0; i < t->num_targets; i++) {
@ -1604,7 +1590,7 @@ int dm_calculate_queue_limits(struct dm_table *t,
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
blk_set_stacking_limits(&ti_limits);
dm_set_stacking_limits(&ti_limits);
if (!ti->type->iterate_devices) {
/* Set I/O hints portion of queue limits */
@ -1619,12 +1605,12 @@ int dm_calculate_queue_limits(struct dm_table *t,
ti->type->iterate_devices(ti, dm_set_device_limits,
&ti_limits);
if (!zoned && ti_limits.zoned) {
if (!zoned && (ti_limits.features & BLK_FEAT_ZONED)) {
/*
* After stacking all limits, validate all devices
* in table support this zoned model and zone sectors.
*/
zoned = ti_limits.zoned;
zoned = (ti_limits.features & BLK_FEAT_ZONED);
zone_sectors = ti_limits.chunk_sectors;
}
@ -1672,12 +1658,12 @@ combine_limits:
* zoned model on host-managed zoned block devices.
* BUT...
*/
if (limits->zoned) {
if (limits->features & BLK_FEAT_ZONED) {
/*
* ...IF the above limits stacking determined a zoned model
* validate that all of the table's devices conform to it.
*/
zoned = limits->zoned;
zoned = limits->features & BLK_FEAT_ZONED;
zone_sectors = limits->chunk_sectors;
}
if (validate_hardware_zoned(t, zoned, zone_sectors))
@ -1686,34 +1672,16 @@ combine_limits:
return validate_hardware_logical_block_alignment(t, limits);
}
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
/*
* Check if a target requires flush support even if none of the underlying
* devices need it (e.g. to persist target-specific metadata).
*/
static bool dm_table_supports_flush(struct dm_table *t)
{
unsigned long flush = (unsigned long) data;
struct request_queue *q = bdev_get_queue(dev->bdev);
return (q->queue_flags & flush);
}
static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
{
/*
* Require at least one underlying device to support flushes.
* t->devices includes internal dm devices such as mirror logs
* so we need to use iterate_devices here, which targets
* supporting flushes must provide.
*/
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
if (!ti->num_flush_bios)
continue;
if (ti->flush_supported)
return true;
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
if (ti->num_flush_bios && ti->flush_supported)
return true;
}
@ -1734,20 +1702,6 @@ static int device_dax_write_cache_enabled(struct dm_target *ti,
return false;
}
static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
return !bdev_nonrot(dev->bdev);
}
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return !blk_queue_add_random(q);
}
static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@ -1772,12 +1726,6 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
return true;
}
static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
return !bdev_nowait(dev->bdev);
}
static bool dm_table_supports_nowait(struct dm_table *t)
{
for (unsigned int i = 0; i < t->num_targets; i++) {
@ -1785,10 +1733,6 @@ static bool dm_table_supports_nowait(struct dm_table *t)
if (!dm_target_supports_nowait(ti->type))
return false;
if (!ti->type->iterate_devices ||
ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
return false;
}
return true;
@ -1845,23 +1789,20 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
return true;
}
static int device_requires_stable_pages(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
return bdev_stable_writes(dev->bdev);
}
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
bool wc = false, fua = false;
int r;
if (dm_table_supports_nowait(t))
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
else
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
if (!dm_table_supports_nowait(t))
limits->features &= ~BLK_FEAT_NOWAIT;
/*
* The current polling impementation does not support request based
* stacking.
*/
if (!__table_type_bio_based(t->type))
limits->features &= ~BLK_FEAT_POLL;
if (!dm_table_supports_discards(t)) {
limits->max_hw_discard_sectors = 0;
@ -1876,53 +1817,22 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (!dm_table_supports_secure_erase(t))
limits->max_secure_erase_sectors = 0;
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true;
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
fua = true;
}
blk_queue_write_cache(q, wc, fua);
if (dm_table_supports_flush(t))
limits->features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
if (dm_table_supports_dax(t, device_not_dax_capable)) {
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
limits->features |= BLK_FEAT_DAX;
if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
set_dax_synchronous(t->md->dax_dev);
} else
blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
limits->features &= ~BLK_FEAT_DAX;
if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
dax_write_cache(t->md->dax_dev, true);
/* Ensure that all underlying devices are non-rotational. */
if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
else
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/*
* Some devices don't use blk_integrity but still want stable pages
* because they do their own checksumming.
* If any underlying device requires stable pages, a table must require
* them as well. Only targets that support iterate_devices are considered:
* don't want error, zero, etc to require stable pages.
*/
if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
else
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
/*
* Determine whether or not this queue's I/O timings contribute
* to the entropy pool, Only request-based targets use this.
* Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
* have it set.
*/
if (blk_queue_add_random(q) &&
dm_table_any_dev_attr(t, device_is_not_random, NULL))
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
/* For a zoned table, setup the zone related queue attributes. */
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && limits->zoned) {
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
(limits->features & BLK_FEAT_ZONED)) {
r = dm_set_zones_restrictions(t, q, limits);
if (r)
return r;
@ -1936,28 +1846,14 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
* Now that the limits are set, check the zones mapped by the table
* and setup the resources for zone append emulation if necessary.
*/
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && limits->zoned) {
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
(limits->features & BLK_FEAT_ZONED)) {
r = dm_revalidate_zones(t, q);
if (r)
return r;
}
dm_update_crypto_profile(q, t);
/*
* Check for request-based device is left to
* dm_mq_init_request_queue()->blk_mq_init_allocated_queue().
*
* For bio-based device, only set QUEUE_FLAG_POLL when all
* underlying devices supporting polling.
*/
if (__table_type_bio_based(t->type)) {
if (dm_table_supports_poll(t))
blk_queue_flag_set(QUEUE_FLAG_POLL, q);
else
blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
}
return 0;
}

View File

@ -372,7 +372,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
lim->max_zone_append_sectors = 0;
lim->zone_write_granularity = 0;
lim->chunk_sectors = 0;
lim->zoned = false;
lim->features &= ~BLK_FEAT_ZONED;
clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
md->nr_zones = 0;
disk->nr_zones = 0;
@ -392,7 +392,8 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
DMWARN("%s zone resource limits may be unreliable",
disk->disk_name);
if (lim->zoned && !static_key_enabled(&zoned_enabled.key))
if (lim->features & BLK_FEAT_ZONED &&
!static_key_enabled(&zoned_enabled.key))
static_branch_enable(&zoned_enabled);
return 0;
}

View File

@ -1009,7 +1009,7 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->max_sectors = chunk_sectors;
/* We are exposing a drive-managed zoned block device */
limits->zoned = false;
limits->features &= ~BLK_FEAT_ZONED;
}
/*

View File

@ -2386,22 +2386,15 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
struct table_device *td;
int r;
switch (type) {
case DM_TYPE_REQUEST_BASED:
WARN_ON_ONCE(type == DM_TYPE_NONE);
if (type == DM_TYPE_REQUEST_BASED) {
md->disk->fops = &dm_rq_blk_dops;
r = dm_mq_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based dm mapped device");
return r;
}
break;
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, md->queue);
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
break;
}
r = dm_calculate_queue_limits(t, &limits);

View File

@ -5870,7 +5870,11 @@ struct mddev *md_alloc(dev_t dev, char *name)
int partitioned;
int shift;
int unit;
int error ;
int error;
struct queue_limits lim = {
.features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA |
BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT,
};
/*
* Wait for any previous instance of this device to be completely
@ -5910,7 +5914,7 @@ struct mddev *md_alloc(dev_t dev, char *name)
*/
mddev->hold_active = UNTIL_STOP;
disk = blk_alloc_disk(NULL, NUMA_NO_NODE);
disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
if (IS_ERR(disk)) {
error = PTR_ERR(disk);
goto out_free_mddev;
@ -5928,7 +5932,6 @@ struct mddev *md_alloc(dev_t dev, char *name)
disk->fops = &md_fops;
disk->private_data = mddev;
blk_queue_write_cache(disk->queue, true, true);
disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
error = add_disk(disk);
@ -6232,28 +6235,6 @@ int md_run(struct mddev *mddev)
}
}
if (!mddev_is_dm(mddev)) {
struct request_queue *q = mddev->gendisk->queue;
bool nonrot = true;
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) {
nonrot = false;
break;
}
}
if (mddev->degraded)
nonrot = false;
if (nonrot)
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
else
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, q);
/* Set the NOWAIT flags if all underlying devices support it */
if (nowait)
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_redundancy_group))
@ -7212,15 +7193,6 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
if (!mddev->thread)
md_update_sb(mddev, 1);
/*
* If the new disk does not support REQ_NOWAIT,
* disable on the whole MD.
*/
if (!bdev_nowait(rdev->bdev)) {
pr_info("%s: Disabling nowait because %pg does not support nowait\n",
mdname(mddev), rdev->bdev);
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->gendisk->queue);
}
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.

View File

@ -7086,12 +7086,14 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
err = -ENODEV;
else if (new != conf->skip_copy) {
struct request_queue *q = mddev->gendisk->queue;
struct queue_limits lim = queue_limits_start_update(q);
conf->skip_copy = new;
if (new)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
lim.features |= BLK_FEAT_STABLE_WRITES;
else
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
lim.features &= ~BLK_FEAT_STABLE_WRITES;
err = queue_limits_commit_update(q, &lim);
}
mddev_unlock_and_resume(mddev);
return err ?: len;

View File

@ -2466,8 +2466,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct mmc_blk_data *md;
int devidx, ret;
char cap_str[10];
bool cache_enabled = false;
bool fua_enabled = false;
unsigned int features = 0;
devidx = ida_alloc_max(&mmc_blk_ida, max_devices - 1, GFP_KERNEL);
if (devidx < 0) {
@ -2499,7 +2498,24 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
*/
md->read_only = mmc_blk_readonly(card);
md->disk = mmc_init_queue(&md->queue, card);
if (mmc_host_cmd23(card->host)) {
if ((mmc_card_mmc(card) &&
card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
(mmc_card_sd(card) &&
card->scr.cmds & SD_SCR_CMD23_SUPPORT))
md->flags |= MMC_BLK_CMD23;
}
if (md->flags & MMC_BLK_CMD23 &&
((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
card->ext_csd.rel_sectors)) {
md->flags |= MMC_BLK_REL_WR;
features |= (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
} else if (mmc_cache_enabled(card->host)) {
features |= BLK_FEAT_WRITE_CACHE;
}
md->disk = mmc_init_queue(&md->queue, card, features);
if (IS_ERR(md->disk)) {
ret = PTR_ERR(md->disk);
goto err_kfree;
@ -2539,26 +2555,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
set_capacity(md->disk, size);
if (mmc_host_cmd23(card->host)) {
if ((mmc_card_mmc(card) &&
card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
(mmc_card_sd(card) &&
card->scr.cmds & SD_SCR_CMD23_SUPPORT))
md->flags |= MMC_BLK_CMD23;
}
if (md->flags & MMC_BLK_CMD23 &&
((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
card->ext_csd.rel_sectors)) {
md->flags |= MMC_BLK_REL_WR;
fua_enabled = true;
cache_enabled = true;
}
if (mmc_cache_enabled(card->host))
cache_enabled = true;
blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled);
string_get_size((u64)size, 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
pr_info("%s: %s %s %s%s\n",

View File

@ -344,10 +344,12 @@ static const struct blk_mq_ops mmc_mq_ops = {
};
static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq,
struct mmc_card *card)
struct mmc_card *card, unsigned int features)
{
struct mmc_host *host = card->host;
struct queue_limits lim = { };
struct queue_limits lim = {
.features = features,
};
struct gendisk *disk;
if (mmc_can_erase(card))
@ -376,18 +378,16 @@ static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq,
lim.max_segments = host->max_segs;
}
if (mmc_host_is_spi(host) && host->use_spi_crc)
lim.features |= BLK_FEAT_STABLE_WRITES;
disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq);
if (IS_ERR(disk))
return disk;
mq->queue = disk->queue;
if (mmc_host_is_spi(host) && host->use_spi_crc)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
blk_queue_rq_timeout(mq->queue, 60 * HZ);
blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
@ -413,10 +413,12 @@ static inline bool mmc_merge_capable(struct mmc_host *host)
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
* @card: mmc card to attach this queue
* @features: block layer features (BLK_FEAT_*)
*
* Initialise a MMC card request queue.
*/
struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
unsigned int features)
{
struct mmc_host *host = card->host;
struct gendisk *disk;
@ -460,7 +462,7 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
return ERR_PTR(ret);
disk = mmc_alloc_disk(mq, card);
disk = mmc_alloc_disk(mq, card, features);
if (IS_ERR(disk))
blk_mq_free_tag_set(&mq->tag_set);
return disk;

View File

@ -94,7 +94,8 @@ struct mmc_queue {
struct work_struct complete_work;
};
struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card);
struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
unsigned int features);
extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);

View File

@ -336,6 +336,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
lim.logical_block_size = tr->blksize;
if (tr->discard)
lim.max_hw_discard_sectors = UINT_MAX;
if (tr->flush)
lim.features |= BLK_FEAT_WRITE_CACHE;
/* Create gendisk */
gd = blk_mq_alloc_disk(new->tag_set, &lim, new);
@ -372,13 +374,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
/* Create the request queue */
spin_lock_init(&new->queue_lock);
INIT_LIST_HEAD(&new->rq_list);
if (tr->flush)
blk_queue_write_cache(new->rq, true, false);
blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
gd->queue = new->rq;
if (new->readonly)

View File

@ -1501,6 +1501,7 @@ static int btt_blk_init(struct btt *btt)
.logical_block_size = btt->sector_size,
.max_hw_sectors = UINT_MAX,
.max_integrity_segments = 1,
.features = BLK_FEAT_SYNCHRONOUS,
};
int rc;
@ -1518,9 +1519,6 @@ static int btt_blk_init(struct btt *btt)
btt->btt_disk->fops = &btt_fops;
btt->btt_disk->private_data = btt;
blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue);
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
if (rc)

View File

@ -455,6 +455,8 @@ static int pmem_attach_disk(struct device *dev,
.logical_block_size = pmem_sector_size(ndns),
.physical_block_size = PAGE_SIZE,
.max_hw_sectors = UINT_MAX,
.features = BLK_FEAT_WRITE_CACHE |
BLK_FEAT_SYNCHRONOUS,
};
int nid = dev_to_node(dev), fua;
struct resource *res = &nsio->res;
@ -463,7 +465,6 @@ static int pmem_attach_disk(struct device *dev,
struct dax_device *dax_dev;
struct nd_pfn_sb *pfn_sb;
struct pmem_device *pmem;
struct request_queue *q;
struct gendisk *disk;
void *addr;
int rc;
@ -495,6 +496,10 @@ static int pmem_attach_disk(struct device *dev,
dev_warn(dev, "unable to guarantee persistence of writes\n");
fua = 0;
}
if (fua)
lim.features |= BLK_FEAT_FUA;
if (is_nd_pfn(dev))
lim.features |= BLK_FEAT_DAX;
if (!devm_request_mem_region(dev, res->start, resource_size(res),
dev_name(&ndns->dev))) {
@ -505,7 +510,6 @@ static int pmem_attach_disk(struct device *dev,
disk = blk_alloc_disk(&lim, nid);
if (IS_ERR(disk))
return PTR_ERR(disk);
q = disk->queue;
pmem->disk = disk;
pmem->pgmap.owner = pmem;
@ -543,12 +547,6 @@ static int pmem_attach_disk(struct device *dev,
}
pmem->virt_addr = addr;
blk_queue_write_cache(q, true, fua);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q);
if (pmem->pfn_flags & PFN_MAP)
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
disk->fops = &pmem_fops;
disk->private_data = pmem;
nvdimm_namespace_disk_name(ndns, disk->disk_name);

View File

@ -2056,7 +2056,6 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns,
static int nvme_update_ns_info_block(struct nvme_ns *ns,
struct nvme_ns_info *info)
{
bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT;
struct queue_limits lim;
struct nvme_id_ns_nvm *nvm = NULL;
struct nvme_zone_info zi = {};
@ -2106,6 +2105,11 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ns->head->ids.csi == NVME_CSI_ZNS)
nvme_update_zone_info(ns, &lim, &zi);
if (ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT)
lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
else
lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
/*
* Register a metadata profile for PI, or the plain non-integrity NVMe
* metadata masquerading as Type 0 if supported, otherwise reject block
@ -2132,7 +2136,6 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3)))
ns->head->features |= NVME_NS_DEAC;
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
blk_queue_write_cache(ns->disk->queue, vwc, vwc);
set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
@ -3721,6 +3724,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
{
struct queue_limits lim = { };
struct nvme_ns *ns;
struct gendisk *disk;
int node = ctrl->numa_node;
@ -3729,7 +3733,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
if (!ns)
return;
disk = blk_mq_alloc_disk(ctrl->tagset, NULL, ns);
if (ctrl->opts && ctrl->opts->data_digest)
lim.features |= BLK_FEAT_STABLE_WRITES;
if (ctrl->ops->supports_pci_p2pdma &&
ctrl->ops->supports_pci_p2pdma(ctrl))
lim.features |= BLK_FEAT_PCI_P2PDMA;
disk = blk_mq_alloc_disk(ctrl->tagset, &lim, ns);
if (IS_ERR(disk))
goto out_free_ns;
disk->fops = &nvme_bdev_ops;
@ -3737,15 +3747,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
ns->disk = disk;
ns->queue = disk->queue;
if (ctrl->opts && ctrl->opts->data_digest)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
if (ctrl->ops->supports_pci_p2pdma &&
ctrl->ops->supports_pci_p2pdma(ctrl))
blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
ns->ctrl = ctrl;
kref_init(&ns->kref);
@ -4488,13 +4489,15 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
return ret;
if (ctrl->ops->flags & NVME_F_FABRICS) {
ctrl->connect_q = blk_mq_alloc_queue(set, NULL, NULL);
struct queue_limits lim = {
.features = BLK_FEAT_SKIP_TAGSET_QUIESCE,
};
ctrl->connect_q = blk_mq_alloc_queue(set, &lim, NULL);
if (IS_ERR(ctrl->connect_q)) {
ret = PTR_ERR(ctrl->connect_q);
goto out_free_tag_set;
}
blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE,
ctrl->connect_q);
}
ctrl->tagset = set;

View File

@ -521,7 +521,6 @@ static void nvme_requeue_work(struct work_struct *work)
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
{
struct queue_limits lim;
bool vwc = false;
mutex_init(&head->lock);
bio_list_init(&head->requeue_list);
@ -539,6 +538,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
blk_set_stacking_limits(&lim);
lim.dma_alignment = 3;
lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
if (head->ids.csi != NVME_CSI_ZNS)
lim.max_zone_append_sectors = 0;
@ -549,24 +549,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
head->disk->private_data = head;
sprintf(head->disk->disk_name, "nvme%dn%d",
ctrl->subsys->instance, head->instance);
blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue);
/*
* This assumes all controllers that refer to a namespace either
* support poll queues or not. That is not a strict guarantee,
* but if the assumption is wrong the effect is only suboptimal
* performance but not correctness problem.
*/
if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL &&
ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
/* we need to propagate up the VMC settings */
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
vwc = true;
blk_queue_write_cache(head->disk->queue, vwc, vwc);
return 0;
}
@ -875,10 +857,6 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
nvme_mpath_set_live(ns);
}
if (test_bit(QUEUE_FLAG_STABLE_WRITES, &ns->queue->queue_flags) &&
ns->head->disk)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
ns->head->disk->queue);
#ifdef CONFIG_BLK_DEV_ZONED
if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
ns->head->disk->nr_zones = ns->disk->nr_zones;

View File

@ -108,13 +108,12 @@ free_data:
void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
struct nvme_zone_info *zi)
{
lim->zoned = 1;
lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
lim->max_open_zones = zi->max_open_zones;
lim->max_active_zones = zi->max_active_zones;
lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
lim->chunk_sectors = ns->head->zsze =
nvme_lba_to_sect(ns->head, zi->zone_size);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
}
static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,

View File

@ -68,7 +68,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
blk_mq_free_tag_set(&block->tag_set);
return PTR_ERR(gdp);
}
blk_queue_flag_set(QUEUE_FLAG_NONROT, gdp->queue);
/* Initialize gendisk structure. */
gdp->major = DASD_MAJOR;

View File

@ -548,6 +548,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
{
struct queue_limits lim = {
.logical_block_size = 4096,
.features = BLK_FEAT_DAX,
};
int rc, i, j, num_of_segments;
struct dcssblk_dev_info *dev_info;
@ -643,7 +644,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->fops = &dcssblk_devops;
dev_info->gd->private_data = dev_info;
dev_info->gd->flags |= GENHD_FL_NO_PART;
blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue);
seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors

View File

@ -439,7 +439,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
.logical_block_size = 1 << 12,
};
unsigned int devindex;
struct request_queue *rq;
int len, ret;
lim.max_segments = min(scmdev->nr_max_block,
@ -474,10 +473,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
ret = PTR_ERR(bdev->gendisk);
goto out_tag;
}
rq = bdev->rq = bdev->gendisk->queue;
blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
bdev->gendisk->private_data = scmdev;
bdev->gendisk->fops = &scm_blk_devops;
bdev->gendisk->major = scm_major;

View File

@ -1057,15 +1057,15 @@ static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
return 0;
}
static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
static int iscsi_sw_tcp_device_configure(struct scsi_device *sdev,
struct queue_limits *lim)
{
struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host);
struct iscsi_session *session = tcp_sw_host->session;
struct iscsi_conn *conn = session->leadconn;
if (conn->datadgst_en)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
sdev->request_queue);
lim->features |= BLK_FEAT_STABLE_WRITES;
return 0;
}
@ -1083,7 +1083,7 @@ static const struct scsi_host_template iscsi_sw_tcp_sht = {
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.dma_boundary = PAGE_SIZE - 1,
.slave_configure = iscsi_sw_tcp_slave_configure,
.device_configure = iscsi_sw_tcp_device_configure,
.proc_name = "iscsi_tcp",
.this_id = -1,
.track_queue_depth = 1,

View File

@ -631,8 +631,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (blk_update_request(req, error, bytes))
return true;
// XXX:
if (blk_queue_add_random(q))
if (q->limits.features & BLK_FEAT_ADD_RANDOM)
add_disk_randomness(req->q->disk);
WARN_ON_ONCE(!blk_rq_is_passthrough(req) &&
@ -1987,7 +1986,7 @@ void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim)
shost->dma_alignment, dma_get_cache_alignment() - 1);
if (shost->no_highmem)
lim->bounce = BLK_BOUNCE_HIGH;
lim->features |= BLK_FEAT_BOUNCE_HIGH;
dma_set_seg_boundary(dev, shost->dma_boundary);
dma_set_max_seg_size(dev, shost->max_segment_size);

View File

@ -120,17 +120,18 @@ static const char *sd_cache_types[] = {
"write back, no read (daft)"
};
static void sd_set_flush_flag(struct scsi_disk *sdkp)
static void sd_set_flush_flag(struct scsi_disk *sdkp,
struct queue_limits *lim)
{
bool wc = false, fua = false;
if (sdkp->WCE) {
wc = true;
lim->features |= BLK_FEAT_WRITE_CACHE;
if (sdkp->DPOFUA)
fua = true;
lim->features |= BLK_FEAT_FUA;
else
lim->features &= ~BLK_FEAT_FUA;
} else {
lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
}
blk_queue_write_cache(sdkp->disk->queue, wc, fua);
}
static ssize_t
@ -168,9 +169,18 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
if (sdkp->cache_override) {
struct queue_limits lim;
sdkp->WCE = wce;
sdkp->RCD = rcd;
sd_set_flush_flag(sdkp);
lim = queue_limits_start_update(sdkp->disk->queue);
sd_set_flush_flag(sdkp, &lim);
blk_mq_freeze_queue(sdkp->disk->queue);
ret = queue_limits_commit_update(sdkp->disk->queue, &lim);
blk_mq_unfreeze_queue(sdkp->disk->queue);
if (ret)
return ret;
return count;
}
@ -466,10 +476,6 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
if (sdp->type != TYPE_DISK)
return -EINVAL;
/* ignore the provisioning mode for ZBC devices */
if (sd_is_zoned(sdkp))
return count;
mode = sysfs_match_string(lbp_mode, buf);
if (mode < 0)
return -EINVAL;
@ -2288,7 +2294,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
}
out:
if (sd_is_zoned(sdkp))
if (sdkp->device->type == TYPE_ZBC)
good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr);
SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
@ -3295,7 +3301,6 @@ static void sd_read_block_limits_ext(struct scsi_disk *sdkp)
static void sd_read_block_characteristics(struct scsi_disk *sdkp,
struct queue_limits *lim)
{
struct request_queue *q = sdkp->disk->queue;
struct scsi_vpd *vpd;
u16 rot;
@ -3311,34 +3316,13 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp,
sdkp->zoned = (vpd->data[8] >> 4) & 3;
rcu_read_unlock();
if (rot == 1) {
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
}
#ifdef CONFIG_BLK_DEV_ZONED /* sd_probe rejects ZBD devices early otherwise */
if (sdkp->device->type == TYPE_ZBC) {
lim->zoned = true;
/*
* Per ZBC and ZAC specifications, writes in sequential write
* required zones of host-managed devices must be aligned to
* the device physical block size.
*/
lim->zone_write_granularity = sdkp->physical_block_size;
} else {
/*
* Host-aware devices are treated as conventional.
*/
lim->zoned = false;
}
#endif /* CONFIG_BLK_DEV_ZONED */
if (rot == 1)
lim->features &= ~(BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
if (!sdkp->first_scan)
return;
if (lim->zoned)
if (sdkp->device->type == TYPE_ZBC)
sd_printk(KERN_NOTICE, sdkp, "Host-managed zoned block device\n");
else if (sdkp->zoned == 1)
sd_printk(KERN_NOTICE, sdkp, "Host-aware SMR disk used as regular disk\n");
@ -3612,7 +3596,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
{
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
struct request_queue *q = sdkp->disk->queue;
sector_t old_capacity = sdkp->capacity;
struct queue_limits lim;
unsigned char *buffer;
@ -3659,8 +3642,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
* cause this to be updated correctly and any device which
* doesn't support it should be treated as rotational.
*/
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
lim.features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
if (scsi_device_supports_vpd(sdp)) {
sd_read_block_provisioning(sdkp);
@ -3686,7 +3668,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
* We now have all cache related info, determine how we deal
* with flush requests.
*/
sd_set_flush_flag(sdkp);
sd_set_flush_flag(sdkp, &lim);
/* Initial block count limit based on CDB TRANSFER LENGTH field size. */
dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;

View File

@ -222,11 +222,6 @@ static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sec
void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim);
static inline int sd_is_zoned(struct scsi_disk *sdkp)
{
return sdkp->zoned == 1 || sdkp->device->type == TYPE_ZBC;
}
#ifdef CONFIG_BLK_DEV_ZONED
int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim,

View File

@ -232,7 +232,7 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
int zone_idx = 0;
int ret;
if (!sd_is_zoned(sdkp))
if (sdkp->device->type != TYPE_ZBC)
/* Not a zoned device */
return -EOPNOTSUPP;
@ -300,7 +300,7 @@ static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
sector_t sector = blk_rq_pos(rq);
if (!sd_is_zoned(sdkp))
if (sdkp->device->type != TYPE_ZBC)
/* Not a zoned device */
return BLK_STS_IOERR;
@ -521,7 +521,7 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
static void sd_zbc_print_zones(struct scsi_disk *sdkp)
{
if (!sd_is_zoned(sdkp) || !sdkp->capacity)
if (sdkp->device->type != TYPE_ZBC || !sdkp->capacity)
return;
if (sdkp->capacity & (sdkp->zone_info.zone_blocks - 1))
@ -592,19 +592,21 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim,
u8 buf[SD_BUF_SIZE])
{
struct gendisk *disk = sdkp->disk;
struct request_queue *q = disk->queue;
unsigned int nr_zones;
u32 zone_blocks = 0;
int ret;
if (!sd_is_zoned(sdkp)) {
/*
* Device managed or normal SCSI disk, no special handling
* required.
*/
if (sdkp->device->type != TYPE_ZBC)
return 0;
}
lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
/*
* Per ZBC and ZAC specifications, writes in sequential write required
* zones of host-managed devices must be aligned to the device physical
* block size.
*/
lim->zone_write_granularity = sdkp->physical_block_size;
/* READ16/WRITE16/SYNC16 is mandatory for ZBC devices */
sdkp->device->use_16_for_rw = 1;
@ -626,7 +628,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim,
sdkp->early_zone_info.zone_blocks = zone_blocks;
/* The drive satisfies the kernel restrictions: set it up */
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
if (sdkp->zones_max_open == U32_MAX)
lim->max_open_zones = 0;
else

View File

@ -282,6 +282,67 @@ static inline bool blk_op_is_passthrough(blk_opf_t op)
return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
}
/* flags set by the driver in queue_limits.features */
enum {
/* supports a volatile write cache */
BLK_FEAT_WRITE_CACHE = (1u << 0),
/* supports passing on the FUA bit */
BLK_FEAT_FUA = (1u << 1),
/* rotational device (hard drive or floppy) */
BLK_FEAT_ROTATIONAL = (1u << 2),
/* contributes to the random number pool */
BLK_FEAT_ADD_RANDOM = (1u << 3),
/* do disk/partitions IO accounting */
BLK_FEAT_IO_STAT = (1u << 4),
/* don't modify data until writeback is done */
BLK_FEAT_STABLE_WRITES = (1u << 5),
/* always completes in submit context */
BLK_FEAT_SYNCHRONOUS = (1u << 6),
/* supports REQ_NOWAIT */
BLK_FEAT_NOWAIT = (1u << 7),
/* supports DAX */
BLK_FEAT_DAX = (1u << 8),
/* supports I/O polling */
BLK_FEAT_POLL = (1u << 9),
/* is a zoned device */
BLK_FEAT_ZONED = (1u << 10),
/* supports Zone Reset All */
BLK_FEAT_ZONE_RESETALL = (1u << 11),
/* supports PCI(e) p2p requests */
BLK_FEAT_PCI_P2PDMA = (1u << 12),
/* skip this queue in blk_mq_(un)quiesce_tagset */
BLK_FEAT_SKIP_TAGSET_QUIESCE = (1u << 13),
/* bounce all highmem pages */
BLK_FEAT_BOUNCE_HIGH = (1u << 14),
};
/*
* Flags automatically inherited when stacking limits.
*/
#define BLK_FEAT_INHERIT_MASK \
(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH)
/* internal flags in queue_limits.flags */
enum {
/* do not send FLUSH or FUA command despite advertised write cache */
BLK_FLAGS_WRITE_CACHE_DISABLED = (1u << 31),
};
/*
* BLK_BOUNCE_NONE: never bounce (default)
* BLK_BOUNCE_HIGH: bounce all highmem pages
@ -292,7 +353,8 @@ enum blk_bounce {
};
struct queue_limits {
enum blk_bounce bounce;
unsigned int features;
unsigned int flags;
unsigned long seg_boundary_mask;
unsigned long virt_boundary_mask;
@ -324,7 +386,6 @@ struct queue_limits {
unsigned char misaligned;
unsigned char discard_misaligned;
unsigned char raid_partial_stripes_expensive;
bool zoned;
unsigned int max_open_zones;
unsigned int max_active_zones;
@ -529,34 +590,17 @@ struct request_queue {
#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
#define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
#define QUEUE_FLAG_HW_WC 13 /* Write back caching supported */
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
#define QUEUE_FLAG_WC 17 /* Write back caching */
#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
#define QUEUE_FLAG_DAX 19 /* device supports DAX */
#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
#define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/
#define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \
(1UL << QUEUE_FLAG_SAME_COMP) | \
(1UL << QUEUE_FLAG_NOWAIT))
#define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP)
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
@ -568,14 +612,12 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
#define blk_queue_nonrot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL)
#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
#define blk_queue_zone_resetall(q) \
test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
#define blk_queue_pci_p2pdma(q) \
test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
((q)->limits.features & BLK_FEAT_ZONE_RESETALL)
#define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
#define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
#define blk_queue_rq_alloc_time(q) \
test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
@ -591,7 +633,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
#define blk_queue_skip_tagset_quiesce(q) \
test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags)
((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
@ -621,7 +663,8 @@ static inline enum rpm_status queue_rpm_status(struct request_queue *q)
static inline bool blk_queue_is_zoned(struct request_queue *q)
{
return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && q->limits.zoned;
return IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
(q->limits.features & BLK_FEAT_ZONED);
}
#ifdef CONFIG_BLK_DEV_ZONED
@ -950,7 +993,6 @@ void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
sector_t offset, const char *pfx);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
struct blk_independent_access_ranges *
disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
@ -1289,8 +1331,7 @@ static inline bool bdev_nonrot(struct block_device *bdev)
static inline bool bdev_synchronous(struct block_device *bdev)
{
return test_bit(QUEUE_FLAG_SYNCHRONOUS,
&bdev_get_queue(bdev)->queue_flags);
return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
}
static inline bool bdev_stable_writes(struct block_device *bdev)
@ -1300,22 +1341,28 @@ static inline bool bdev_stable_writes(struct block_device *bdev)
if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE)
return true;
return test_bit(QUEUE_FLAG_STABLE_WRITES, &q->queue_flags);
return q->limits.features & BLK_FEAT_STABLE_WRITES;
}
static inline bool blk_queue_write_cache(struct request_queue *q)
{
return (q->limits.features & BLK_FEAT_WRITE_CACHE) &&
!(q->limits.flags & BLK_FLAGS_WRITE_CACHE_DISABLED);
}
static inline bool bdev_write_cache(struct block_device *bdev)
{
return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags);
return blk_queue_write_cache(bdev_get_queue(bdev));
}
static inline bool bdev_fua(struct block_device *bdev)
{
return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
return bdev_get_queue(bdev)->limits.features & BLK_FEAT_FUA;
}
static inline bool bdev_nowait(struct block_device *bdev)
{
return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT;
}
static inline bool bdev_is_zoned(struct block_device *bdev)