mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
block-6.3-2023-03-16
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmQT2N0QHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpqGoD/4qkjAY1yh8+9Fa6pWRQZTVJVv2TNqyHi2y wabrRpuhCzuJ7MVLOf2TXG0OJihBiwE2CYjuUOurUxQA+MPxbktnna5MAHBxLsdR Fd+79axMBloXM++h+GwbzRLN2vIIL8TRUONuuFMXvZ0+z/e6NR2xwiaTsRQBY04w KhrOQ4IKYkKxBVeZIWagaYhhZ+DVp7pvtt10eGLQVSSXjdkVs/DYNgguUndcxfYA WV1Fz8XJu1VDyyHvDsT90WMYqSk0bBjvbj75qPdv0p/EgHBoEvhQRdDu+KrLmYqd MrdlqKDgdycuPlCoY4NnZH1s5GX2T01CWmBawVRiz6SF+fbxZIOt/mxH4Wu0QX3f SNlMSbYEiiKtwY52O+8xgui51wxFjIwe0kyXd1UUp0OsRAGn4DpXWKtE0JMTYd34 Xs5V2vMXFniCrniuzGoyyvmhOZXzywoOYsOLZMO28vfHXxKFeW5piC9v+JybQoBP vYCx6+av7dYTkaNehfaWSPvuvQ2c37nJhkLLpzOh2ixiNxFGrIDpgF3C+7q0xObZ UWoFmd5VfUdWLTteBYRx5DE4k14YAswI9CO5uO7oj2nKQjBCuPQjD32JGyQpgDDK RZxBDQAUt906/jBj7WWgYzWOvC1ly7wAnshtd44+lIL4JT3NJBXzPnWlT58yqvqM j11VoUMtwQ== =qTVU -----END PGP SIGNATURE----- Merge tag 'block-6.3-2023-03-16' of git://git.kernel.dk/linux Pull block fixes from Jens Axboe: "A bit bigger than usual, as the NVMe pull request missed last weeks submission. In detail: - NVMe pull request via Christoph: - Avoid potential UAF in nvmet_req_complete (Damien Le Moal) - More quirks (Elmer Miroslav Mosher Golovin, Philipp Geulen) - Fix a memory leak in the nvme-pci probe teardown path (Irvin Cote) - Repair the MAINTAINERS entry (Lukas Bulwahn) - Fix handling single range discard request (Ming Lei) - Show more opcode names in trace events (Minwoo Im) - Fix nvme-tcp timeout reporting (Sagi Grimberg) - MD pull request via Song: - Two fixes for old issues (Neil) - Resource leak in device stopping (Xiao) - Bio based device stats fix (Yu) - Kill unused CONFIG_BLOCK_COMPAT (Lukas) - sunvdc missing mdesc_grab() failure check (Liang) - Fix for reversal of request ordering upon issue for certain cases (Jan) - null_blk timeout fixes (Damien) - Loop use-after-free fix (Bart) - blk-mq SRCU fix for BLK_MQ_F_BLOCKING devices (Chris)" * tag 'block-6.3-2023-03-16' of git://git.kernel.dk/linux: block: remove obsolete config BLOCK_COMPAT md: select BLOCK_LEGACY_AUTOLOAD block: count 'ios' and 'sectors' when io is done for bio-based device block: sunvdc: add check for mdesc_grab() returning NULL nvmet: avoid potential UAF in nvmet_req_complete() nvme-trace: show more opcode names nvme-tcp: add nvme-tcp pdu size build protection nvme-tcp: fix opcode reporting in the timeout handler nvme-pci: add NVME_QUIRK_BOGUS_NID for Lexar NM620 nvme-pci: add NVME_QUIRK_BOGUS_NID for Netac NV3000 nvme-pci: fixing memory leak in probe teardown path nvme: fix handling single range discard request MAINTAINERS: repair malformed T: entries in NVM EXPRESS DRIVERS block: null_blk: cleanup null_queue_rq() block: null_blk: Fix handling of fake timeout request blk-mq: fix "bad unlock balance detected" on q->srcu in __blk_mq_run_dispatch_ops loop: Fix use-after-free issues block: do not reverse request order when flushing plug list md: avoid signed overflow in slot_store() md: Free resources in __md_stop
This commit is contained in:
commit
8d3c682a5e
@ -14872,12 +14872,12 @@ M: Sagi Grimberg <sagi@grimberg.me>
|
||||
L: linux-nvme@lists.infradead.org
|
||||
S: Supported
|
||||
W: http://git.infradead.org/nvme.git
|
||||
T: git://git.infradead.org/nvme.git
|
||||
T: git git://git.infradead.org/nvme.git
|
||||
F: Documentation/nvme/
|
||||
F: drivers/nvme/host/
|
||||
F: drivers/nvme/common/
|
||||
F: include/linux/nvme.h
|
||||
F: drivers/nvme/host/
|
||||
F: include/linux/nvme-*.h
|
||||
F: include/linux/nvme.h
|
||||
F: include/uapi/linux/nvme_ioctl.h
|
||||
|
||||
NVM EXPRESS FABRICS AUTHENTICATION
|
||||
@ -14912,7 +14912,7 @@ M: Chaitanya Kulkarni <kch@nvidia.com>
|
||||
L: linux-nvme@lists.infradead.org
|
||||
S: Supported
|
||||
W: http://git.infradead.org/nvme.git
|
||||
T: git://git.infradead.org/nvme.git
|
||||
T: git git://git.infradead.org/nvme.git
|
||||
F: drivers/nvme/target/
|
||||
|
||||
NVMEM FRAMEWORK
|
||||
|
@ -204,9 +204,6 @@ config BLK_INLINE_ENCRYPTION_FALLBACK
|
||||
|
||||
source "block/partitions/Kconfig"
|
||||
|
||||
config BLOCK_COMPAT
|
||||
def_bool COMPAT
|
||||
|
||||
config BLK_MQ_PCI
|
||||
def_bool PCI
|
||||
|
||||
|
@ -959,16 +959,11 @@ again:
|
||||
}
|
||||
}
|
||||
|
||||
unsigned long bdev_start_io_acct(struct block_device *bdev,
|
||||
unsigned int sectors, enum req_op op,
|
||||
unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
|
||||
unsigned long start_time)
|
||||
{
|
||||
const int sgrp = op_stat_group(op);
|
||||
|
||||
part_stat_lock();
|
||||
update_io_ticks(bdev, start_time, false);
|
||||
part_stat_inc(bdev, ios[sgrp]);
|
||||
part_stat_add(bdev, sectors[sgrp], sectors);
|
||||
part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
|
||||
part_stat_unlock();
|
||||
|
||||
@ -984,13 +979,12 @@ EXPORT_SYMBOL(bdev_start_io_acct);
|
||||
*/
|
||||
unsigned long bio_start_io_acct(struct bio *bio)
|
||||
{
|
||||
return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
|
||||
bio_op(bio), jiffies);
|
||||
return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bio_start_io_acct);
|
||||
|
||||
void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
|
||||
unsigned long start_time)
|
||||
unsigned int sectors, unsigned long start_time)
|
||||
{
|
||||
const int sgrp = op_stat_group(op);
|
||||
unsigned long now = READ_ONCE(jiffies);
|
||||
@ -998,6 +992,8 @@ void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
|
||||
|
||||
part_stat_lock();
|
||||
update_io_ticks(bdev, now, true);
|
||||
part_stat_inc(bdev, ios[sgrp]);
|
||||
part_stat_add(bdev, sectors[sgrp], sectors);
|
||||
part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
|
||||
part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
|
||||
part_stat_unlock();
|
||||
@ -1007,7 +1003,7 @@ EXPORT_SYMBOL(bdev_end_io_acct);
|
||||
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
|
||||
struct block_device *orig_bdev)
|
||||
{
|
||||
bdev_end_io_acct(orig_bdev, bio_op(bio), start_time);
|
||||
bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
|
||||
|
||||
|
@ -2725,6 +2725,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
|
||||
struct blk_mq_hw_ctx *this_hctx = NULL;
|
||||
struct blk_mq_ctx *this_ctx = NULL;
|
||||
struct request *requeue_list = NULL;
|
||||
struct request **requeue_lastp = &requeue_list;
|
||||
unsigned int depth = 0;
|
||||
LIST_HEAD(list);
|
||||
|
||||
@ -2735,10 +2736,10 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
|
||||
this_hctx = rq->mq_hctx;
|
||||
this_ctx = rq->mq_ctx;
|
||||
} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
|
||||
rq_list_add(&requeue_list, rq);
|
||||
rq_list_add_tail(&requeue_lastp, rq);
|
||||
continue;
|
||||
}
|
||||
list_add_tail(&rq->queuelist, &list);
|
||||
list_add(&rq->queuelist, &list);
|
||||
depth++;
|
||||
} while (!rq_list_empty(plug->mq_list));
|
||||
|
||||
|
@ -378,12 +378,13 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
|
||||
#define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
|
||||
do { \
|
||||
if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
|
||||
struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
|
||||
int srcu_idx; \
|
||||
\
|
||||
might_sleep_if(check_sleep); \
|
||||
srcu_idx = srcu_read_lock((q)->tag_set->srcu); \
|
||||
srcu_idx = srcu_read_lock(__tag_set->srcu); \
|
||||
(dispatch_ops); \
|
||||
srcu_read_unlock((q)->tag_set->srcu, srcu_idx); \
|
||||
srcu_read_unlock(__tag_set->srcu, srcu_idx); \
|
||||
} else { \
|
||||
rcu_read_lock(); \
|
||||
(dispatch_ops); \
|
||||
|
@ -1859,35 +1859,44 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
|
||||
static void loop_handle_cmd(struct loop_cmd *cmd)
|
||||
{
|
||||
struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
|
||||
struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
|
||||
struct request *rq = blk_mq_rq_from_pdu(cmd);
|
||||
const bool write = op_is_write(req_op(rq));
|
||||
struct loop_device *lo = rq->q->queuedata;
|
||||
int ret = 0;
|
||||
struct mem_cgroup *old_memcg = NULL;
|
||||
const bool use_aio = cmd->use_aio;
|
||||
|
||||
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
|
||||
ret = -EIO;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (cmd->blkcg_css)
|
||||
kthread_associate_blkcg(cmd->blkcg_css);
|
||||
if (cmd->memcg_css)
|
||||
if (cmd_blkcg_css)
|
||||
kthread_associate_blkcg(cmd_blkcg_css);
|
||||
if (cmd_memcg_css)
|
||||
old_memcg = set_active_memcg(
|
||||
mem_cgroup_from_css(cmd->memcg_css));
|
||||
mem_cgroup_from_css(cmd_memcg_css));
|
||||
|
||||
/*
|
||||
* do_req_filebacked() may call blk_mq_complete_request() synchronously
|
||||
* or asynchronously if using aio. Hence, do not touch 'cmd' after
|
||||
* do_req_filebacked() has returned unless we are sure that 'cmd' has
|
||||
* not yet been completed.
|
||||
*/
|
||||
ret = do_req_filebacked(lo, rq);
|
||||
|
||||
if (cmd->blkcg_css)
|
||||
if (cmd_blkcg_css)
|
||||
kthread_associate_blkcg(NULL);
|
||||
|
||||
if (cmd->memcg_css) {
|
||||
if (cmd_memcg_css) {
|
||||
set_active_memcg(old_memcg);
|
||||
css_put(cmd->memcg_css);
|
||||
css_put(cmd_memcg_css);
|
||||
}
|
||||
failed:
|
||||
/* complete non-aio request */
|
||||
if (!cmd->use_aio || ret) {
|
||||
if (!use_aio || ret) {
|
||||
if (ret == -EOPNOTSUPP)
|
||||
cmd->ret = ret;
|
||||
else
|
||||
|
@ -1413,8 +1413,7 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
|
||||
case NULL_IRQ_SOFTIRQ:
|
||||
switch (cmd->nq->dev->queue_mode) {
|
||||
case NULL_Q_MQ:
|
||||
if (likely(!blk_should_fake_timeout(cmd->rq->q)))
|
||||
blk_mq_complete_request(cmd->rq);
|
||||
blk_mq_complete_request(cmd->rq);
|
||||
break;
|
||||
case NULL_Q_BIO:
|
||||
/*
|
||||
@ -1658,12 +1657,13 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
|
||||
}
|
||||
|
||||
static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
const struct blk_mq_queue_data *bd)
|
||||
const struct blk_mq_queue_data *bd)
|
||||
{
|
||||
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
|
||||
struct request *rq = bd->rq;
|
||||
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
struct nullb_queue *nq = hctx->driver_data;
|
||||
sector_t nr_sectors = blk_rq_sectors(bd->rq);
|
||||
sector_t sector = blk_rq_pos(bd->rq);
|
||||
sector_t nr_sectors = blk_rq_sectors(rq);
|
||||
sector_t sector = blk_rq_pos(rq);
|
||||
const bool is_poll = hctx->type == HCTX_TYPE_POLL;
|
||||
|
||||
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
|
||||
@ -1672,14 +1672,15 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
cmd->timer.function = null_cmd_timer_expired;
|
||||
}
|
||||
cmd->rq = bd->rq;
|
||||
cmd->rq = rq;
|
||||
cmd->error = BLK_STS_OK;
|
||||
cmd->nq = nq;
|
||||
cmd->fake_timeout = should_timeout_request(bd->rq);
|
||||
cmd->fake_timeout = should_timeout_request(rq) ||
|
||||
blk_should_fake_timeout(rq->q);
|
||||
|
||||
blk_mq_start_request(bd->rq);
|
||||
blk_mq_start_request(rq);
|
||||
|
||||
if (should_requeue_request(bd->rq)) {
|
||||
if (should_requeue_request(rq)) {
|
||||
/*
|
||||
* Alternate between hitting the core BUSY path, and the
|
||||
* driver driven requeue path
|
||||
@ -1687,22 +1688,20 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
nq->requeue_selection++;
|
||||
if (nq->requeue_selection & 1)
|
||||
return BLK_STS_RESOURCE;
|
||||
else {
|
||||
blk_mq_requeue_request(bd->rq, true);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
blk_mq_requeue_request(rq, true);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
if (is_poll) {
|
||||
spin_lock(&nq->poll_lock);
|
||||
list_add_tail(&bd->rq->queuelist, &nq->poll_list);
|
||||
list_add_tail(&rq->queuelist, &nq->poll_list);
|
||||
spin_unlock(&nq->poll_lock);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
if (cmd->fake_timeout)
|
||||
return BLK_STS_OK;
|
||||
|
||||
return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
|
||||
return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
|
||||
}
|
||||
|
||||
static void cleanup_queue(struct nullb_queue *nq)
|
||||
|
@ -972,6 +972,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||
print_version();
|
||||
|
||||
hp = mdesc_grab();
|
||||
if (!hp)
|
||||
return -ENODEV;
|
||||
|
||||
err = -ENODEV;
|
||||
if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
|
||||
|
@ -15,6 +15,10 @@ if MD
|
||||
config BLK_DEV_MD
|
||||
tristate "RAID support"
|
||||
select BLOCK_HOLDER_DEPRECATED if SYSFS
|
||||
# BLOCK_LEGACY_AUTOLOAD requirement should be removed
|
||||
# after relevant mdadm enhancements - to make "names=yes"
|
||||
# the default - are widely available.
|
||||
select BLOCK_LEGACY_AUTOLOAD
|
||||
help
|
||||
This driver lets you combine several hard disk partitions into one
|
||||
logical block device. This can be used to simply append one
|
||||
|
@ -512,10 +512,10 @@ static void dm_io_acct(struct dm_io *io, bool end)
|
||||
sectors = io->sectors;
|
||||
|
||||
if (!end)
|
||||
bdev_start_io_acct(bio->bi_bdev, sectors, bio_op(bio),
|
||||
start_time);
|
||||
bdev_start_io_acct(bio->bi_bdev, bio_op(bio), start_time);
|
||||
else
|
||||
bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time);
|
||||
bdev_end_io_acct(bio->bi_bdev, bio_op(bio), sectors,
|
||||
start_time);
|
||||
|
||||
if (static_branch_unlikely(&stats_enabled) &&
|
||||
unlikely(dm_stats_used(&md->stats))) {
|
||||
|
@ -3128,6 +3128,9 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
|
||||
err = kstrtouint(buf, 10, (unsigned int *)&slot);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (slot < 0)
|
||||
/* overflow */
|
||||
return -ENOSPC;
|
||||
}
|
||||
if (rdev->mddev->pers && slot == -1) {
|
||||
/* Setting 'slot' on an active array requires also
|
||||
@ -6256,6 +6259,11 @@ static void __md_stop(struct mddev *mddev)
|
||||
mddev->to_remove = &md_redundancy_group;
|
||||
module_put(pers->owner);
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
|
||||
percpu_ref_exit(&mddev->writes_pending);
|
||||
percpu_ref_exit(&mddev->active_io);
|
||||
bioset_exit(&mddev->bio_set);
|
||||
bioset_exit(&mddev->sync_set);
|
||||
}
|
||||
|
||||
void md_stop(struct mddev *mddev)
|
||||
@ -6265,10 +6273,6 @@ void md_stop(struct mddev *mddev)
|
||||
*/
|
||||
__md_stop_writes(mddev);
|
||||
__md_stop(mddev);
|
||||
percpu_ref_exit(&mddev->writes_pending);
|
||||
percpu_ref_exit(&mddev->active_io);
|
||||
bioset_exit(&mddev->bio_set);
|
||||
bioset_exit(&mddev->sync_set);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(md_stop);
|
||||
@ -7839,11 +7843,6 @@ static void md_free_disk(struct gendisk *disk)
|
||||
{
|
||||
struct mddev *mddev = disk->private_data;
|
||||
|
||||
percpu_ref_exit(&mddev->writes_pending);
|
||||
percpu_ref_exit(&mddev->active_io);
|
||||
bioset_exit(&mddev->bio_set);
|
||||
bioset_exit(&mddev->sync_set);
|
||||
|
||||
mddev_free(mddev);
|
||||
}
|
||||
|
||||
|
@ -781,16 +781,26 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
|
||||
range = page_address(ns->ctrl->discard_page);
|
||||
}
|
||||
|
||||
__rq_for_each_bio(bio, req) {
|
||||
u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
|
||||
u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
|
||||
if (queue_max_discard_segments(req->q) == 1) {
|
||||
u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
|
||||
u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
|
||||
|
||||
if (n < segments) {
|
||||
range[n].cattr = cpu_to_le32(0);
|
||||
range[n].nlb = cpu_to_le32(nlb);
|
||||
range[n].slba = cpu_to_le64(slba);
|
||||
range[0].cattr = cpu_to_le32(0);
|
||||
range[0].nlb = cpu_to_le32(nlb);
|
||||
range[0].slba = cpu_to_le64(slba);
|
||||
n = 1;
|
||||
} else {
|
||||
__rq_for_each_bio(bio, req) {
|
||||
u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
|
||||
u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
|
||||
|
||||
if (n < segments) {
|
||||
range[n].cattr = cpu_to_le32(0);
|
||||
range[n].nlb = cpu_to_le32(nlb);
|
||||
range[n].slba = cpu_to_le64(slba);
|
||||
}
|
||||
n++;
|
||||
}
|
||||
n++;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(n != segments)) {
|
||||
|
@ -123,9 +123,8 @@ void nvme_mpath_start_request(struct request *rq)
|
||||
return;
|
||||
|
||||
nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
|
||||
nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0,
|
||||
blk_rq_bytes(rq) >> SECTOR_SHIFT,
|
||||
req_op(rq), jiffies);
|
||||
nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq),
|
||||
jiffies);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_mpath_start_request);
|
||||
|
||||
@ -136,7 +135,8 @@ void nvme_mpath_end_request(struct request *rq)
|
||||
if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
|
||||
return;
|
||||
bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
|
||||
nvme_req(rq)->start_time);
|
||||
blk_rq_bytes(rq) >> SECTOR_SHIFT,
|
||||
nvme_req(rq)->start_time);
|
||||
}
|
||||
|
||||
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
|
||||
|
@ -3073,6 +3073,7 @@ out_dev_unmap:
|
||||
nvme_dev_unmap(dev);
|
||||
out_uninit_ctrl:
|
||||
nvme_uninit_ctrl(&dev->ctrl);
|
||||
nvme_put_ctrl(&dev->ctrl);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -3415,6 +3416,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
|
||||
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */
|
||||
@ -3435,6 +3438,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
|
||||
|
@ -208,6 +208,18 @@ static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
|
||||
return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
|
||||
}
|
||||
|
||||
static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req)
|
||||
{
|
||||
return req->pdu;
|
||||
}
|
||||
|
||||
static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req)
|
||||
{
|
||||
/* use the pdu space in the back for the data pdu */
|
||||
return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) -
|
||||
sizeof(struct nvme_tcp_data_pdu);
|
||||
}
|
||||
|
||||
static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
|
||||
{
|
||||
if (nvme_is_fabrics(req->req.cmd))
|
||||
@ -614,7 +626,7 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
|
||||
|
||||
static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
|
||||
{
|
||||
struct nvme_tcp_data_pdu *data = req->pdu;
|
||||
struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req);
|
||||
struct nvme_tcp_queue *queue = req->queue;
|
||||
struct request *rq = blk_mq_rq_from_pdu(req);
|
||||
u32 h2cdata_sent = req->pdu_len;
|
||||
@ -1038,7 +1050,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
|
||||
static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
|
||||
{
|
||||
struct nvme_tcp_queue *queue = req->queue;
|
||||
struct nvme_tcp_cmd_pdu *pdu = req->pdu;
|
||||
struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
|
||||
bool inline_data = nvme_tcp_has_inline_data(req);
|
||||
u8 hdgst = nvme_tcp_hdgst_len(queue);
|
||||
int len = sizeof(*pdu) + hdgst - req->offset;
|
||||
@ -1077,7 +1089,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
|
||||
static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
|
||||
{
|
||||
struct nvme_tcp_queue *queue = req->queue;
|
||||
struct nvme_tcp_data_pdu *pdu = req->pdu;
|
||||
struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
|
||||
u8 hdgst = nvme_tcp_hdgst_len(queue);
|
||||
int len = sizeof(*pdu) - req->offset + hdgst;
|
||||
int ret;
|
||||
@ -2284,7 +2296,7 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
|
||||
{
|
||||
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
|
||||
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
|
||||
struct nvme_tcp_cmd_pdu *pdu = req->pdu;
|
||||
struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
|
||||
u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
|
||||
int qid = nvme_tcp_queue_id(req->queue);
|
||||
|
||||
@ -2323,7 +2335,7 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
|
||||
struct request *rq)
|
||||
{
|
||||
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
|
||||
struct nvme_tcp_cmd_pdu *pdu = req->pdu;
|
||||
struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
|
||||
struct nvme_command *c = &pdu->cmd;
|
||||
|
||||
c->common.flags |= NVME_CMD_SGL_METABUF;
|
||||
@ -2343,7 +2355,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
|
||||
struct request *rq)
|
||||
{
|
||||
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
|
||||
struct nvme_tcp_cmd_pdu *pdu = req->pdu;
|
||||
struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
|
||||
struct nvme_tcp_queue *queue = req->queue;
|
||||
u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
|
||||
blk_status_t ret;
|
||||
@ -2682,6 +2694,15 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
|
||||
|
||||
static int __init nvme_tcp_init_module(void)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
|
||||
|
||||
nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
|
||||
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
|
||||
if (!nvme_tcp_wq)
|
||||
|
@ -756,8 +756,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
|
||||
|
||||
void nvmet_req_complete(struct nvmet_req *req, u16 status)
|
||||
{
|
||||
struct nvmet_sq *sq = req->sq;
|
||||
|
||||
__nvmet_req_complete(req, status);
|
||||
percpu_ref_put(&req->sq->ref);
|
||||
percpu_ref_put(&sq->ref);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvmet_req_complete);
|
||||
|
||||
|
@ -228,6 +228,12 @@ static inline unsigned short req_get_ioprio(struct request *req)
|
||||
*(listptr) = rq; \
|
||||
} while (0)
|
||||
|
||||
#define rq_list_add_tail(lastpptr, rq) do { \
|
||||
(rq)->rq_next = NULL; \
|
||||
**(lastpptr) = rq; \
|
||||
*(lastpptr) = &rq->rq_next; \
|
||||
} while (0)
|
||||
|
||||
#define rq_list_pop(listptr) \
|
||||
({ \
|
||||
struct request *__req = NULL; \
|
||||
|
@ -1446,11 +1446,10 @@ static inline void blk_wake_io_task(struct task_struct *waiter)
|
||||
wake_up_process(waiter);
|
||||
}
|
||||
|
||||
unsigned long bdev_start_io_acct(struct block_device *bdev,
|
||||
unsigned int sectors, enum req_op op,
|
||||
unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
|
||||
unsigned long start_time);
|
||||
void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
|
||||
unsigned long start_time);
|
||||
unsigned int sectors, unsigned long start_time);
|
||||
|
||||
unsigned long bio_start_io_acct(struct bio *bio);
|
||||
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
|
||||
|
@ -812,6 +812,7 @@ enum nvme_opcode {
|
||||
nvme_opcode_name(nvme_cmd_compare), \
|
||||
nvme_opcode_name(nvme_cmd_write_zeroes), \
|
||||
nvme_opcode_name(nvme_cmd_dsm), \
|
||||
nvme_opcode_name(nvme_cmd_verify), \
|
||||
nvme_opcode_name(nvme_cmd_resv_register), \
|
||||
nvme_opcode_name(nvme_cmd_resv_report), \
|
||||
nvme_opcode_name(nvme_cmd_resv_acquire), \
|
||||
@ -1144,10 +1145,14 @@ enum nvme_admin_opcode {
|
||||
nvme_admin_opcode_name(nvme_admin_ns_mgmt), \
|
||||
nvme_admin_opcode_name(nvme_admin_activate_fw), \
|
||||
nvme_admin_opcode_name(nvme_admin_download_fw), \
|
||||
nvme_admin_opcode_name(nvme_admin_dev_self_test), \
|
||||
nvme_admin_opcode_name(nvme_admin_ns_attach), \
|
||||
nvme_admin_opcode_name(nvme_admin_keep_alive), \
|
||||
nvme_admin_opcode_name(nvme_admin_directive_send), \
|
||||
nvme_admin_opcode_name(nvme_admin_directive_recv), \
|
||||
nvme_admin_opcode_name(nvme_admin_virtual_mgmt), \
|
||||
nvme_admin_opcode_name(nvme_admin_nvme_mi_send), \
|
||||
nvme_admin_opcode_name(nvme_admin_nvme_mi_recv), \
|
||||
nvme_admin_opcode_name(nvme_admin_dbbuf), \
|
||||
nvme_admin_opcode_name(nvme_admin_format_nvm), \
|
||||
nvme_admin_opcode_name(nvme_admin_security_send), \
|
||||
|
Loading…
Reference in New Issue
Block a user