forked from Minki/linux
block-5.8-2020-06-11
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl7ioawQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpvbJD/wNLN/H4yIQ7tU5XDdvxvpx/u9FC1t2Pep0 w/olj6wnrsHw/WsgJIlw7efTq9QATfszG/dJKJiBGdiJoCKE1TW/CM6RNfDJb4Z3 TUa9ghYYzcfI2NRdV94Ol9qRThjB6OG6Cdw4k3oKbx44EJOzgatBI6xIA3nU+f/L XO+xl2z3+t28guMvcgUkdJsR8GvSrwcXCvw3X/3uqbtAv5hhMbR7jyqxcHDLX72t I+y3/dWfKaienujEmcLKeW+f2RFyjYIvDbQ5b/JDqLah7Fn1A2wYf+mx7iZuQZSi 5nwGcPuj++8GXS6G8JegAl+s5L3AyBNdz5nrxdAlRjDTMgIUstFgueLnCaW64QNF 93kWK5gDwhq+26AFl3mGJ3m+qhh1AhGWaVniBiFA3OUeWcOgVGlRf6jtmWazQaEI v15WTiAXTsQujnV+t5KYKQnm9vJLIcc/njiSss1JXnqrxR6fH+QCHQ96ckTCqx66 0GbN5RkuC2J/RHYEyYnYIJlNZGDsCVoBC3QR10WNlng82cxMyrahS011xUTn9VN+ 0Gnz1ilNFc+bx1jUO+pl6EdIsEBbFkKioyoZsgba5mvM+Nn3nGbvqQPJc+18fSV2 BW1x2yuoc6yjwuol9NMV+cy13Z9u+uA4c0mFIetjuyjE3rZb77iuIiIKVWMRh6Av Ip6GuPEA2A== =TOc1 -----END PGP SIGNATURE----- Merge tag 'block-5.8-2020-06-11' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "Some followup fixes for this merge window. In particular: - Seqcount write missing preemption disable for stats (Ahmed) - blktrace fixes (Chaitanya) - Redundant initializations (Colin) - Various small NVMe fixes (Chaitanya, Christoph, Daniel, Max, Niklas, Rikard) - loop flag bug regression fix (Martijn) - blk-mq tagging fixes (Christoph, Ming)" * tag 'block-5.8-2020-06-11' of git://git.kernel.dk/linux-block: umem: remove redundant initialization of variable ret pktcdvd: remove redundant initialization of variable ret nvmet: fail outstanding host posted AEN req nvme-pci: use simple suspend when a HMB is enabled nvme-fc: don't call nvme_cleanup_cmd() for AENs nvmet-tcp: constify nvmet_tcp_ops nvme-tcp: constify nvme_tcp_mq_ops and nvme_tcp_admin_mq_ops nvme: do not call del_gendisk() on a disk that was never added blk-mq: fix blk_mq_all_tag_iter blk-mq: split out a __blk_mq_get_driver_tag helper blktrace: fix endianness for blk_log_remap() blktrace: fix endianness in get_pdu_int() blktrace: use errno instead of bi_status block: nr_sects_write(): Disable preemption on seqcount write block: remove the error argument to the block_bio_complete tracepoint loop: Fix wrong masking of status flags block/bio-integrity: don't free 'buf' if bio_integrity_add_page() failed
This commit is contained in:
commit
a58dfea297
@ -281,7 +281,6 @@ bool bio_integrity_prep(struct bio *bio)
|
||||
|
||||
if (ret == 0) {
|
||||
printk(KERN_ERR "could not attach integrity payload\n");
|
||||
kfree(buf);
|
||||
status = BLK_STS_RESOURCE;
|
||||
goto err_end_io;
|
||||
}
|
||||
|
@ -1434,8 +1434,7 @@ again:
|
||||
}
|
||||
|
||||
if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
|
||||
trace_block_bio_complete(bio->bi_disk->queue, bio,
|
||||
blk_status_to_errno(bio->bi_status));
|
||||
trace_block_bio_complete(bio->bi_disk->queue, bio);
|
||||
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
|
||||
}
|
||||
|
||||
|
@ -191,6 +191,33 @@ found_tag:
|
||||
return tag + tag_offset;
|
||||
}
|
||||
|
||||
bool __blk_mq_get_driver_tag(struct request *rq)
|
||||
{
|
||||
struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
|
||||
unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
|
||||
bool shared = blk_mq_tag_busy(rq->mq_hctx);
|
||||
int tag;
|
||||
|
||||
if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
|
||||
bt = &rq->mq_hctx->tags->breserved_tags;
|
||||
tag_offset = 0;
|
||||
}
|
||||
|
||||
if (!hctx_may_queue(rq->mq_hctx, bt))
|
||||
return false;
|
||||
tag = __sbitmap_queue_get(bt);
|
||||
if (tag == BLK_MQ_NO_TAG)
|
||||
return false;
|
||||
|
||||
rq->tag = tag + tag_offset;
|
||||
if (shared) {
|
||||
rq->rq_flags |= RQF_MQ_INFLIGHT;
|
||||
atomic_inc(&rq->mq_hctx->nr_active);
|
||||
}
|
||||
rq->mq_hctx->tags->rqs[rq->tag] = rq;
|
||||
return true;
|
||||
}
|
||||
|
||||
void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
|
||||
unsigned int tag)
|
||||
{
|
||||
@ -269,6 +296,7 @@ struct bt_tags_iter_data {
|
||||
|
||||
#define BT_TAG_ITER_RESERVED (1 << 0)
|
||||
#define BT_TAG_ITER_STARTED (1 << 1)
|
||||
#define BT_TAG_ITER_STATIC_RQS (1 << 2)
|
||||
|
||||
static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
||||
{
|
||||
@ -282,9 +310,12 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
||||
|
||||
/*
|
||||
* We can hit rq == NULL here, because the tagging functions
|
||||
* test and set the bit before assining ->rqs[].
|
||||
* test and set the bit before assigning ->rqs[].
|
||||
*/
|
||||
rq = tags->rqs[bitnr];
|
||||
if (iter_data->flags & BT_TAG_ITER_STATIC_RQS)
|
||||
rq = tags->static_rqs[bitnr];
|
||||
else
|
||||
rq = tags->rqs[bitnr];
|
||||
if (!rq)
|
||||
return true;
|
||||
if ((iter_data->flags & BT_TAG_ITER_STARTED) &&
|
||||
@ -339,11 +370,13 @@ static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
|
||||
* indicates whether or not @rq is a reserved request. Return
|
||||
* true to continue iterating tags, false to stop.
|
||||
* @priv: Will be passed as second argument to @fn.
|
||||
*
|
||||
* Caller has to pass the tag map from which requests are allocated.
|
||||
*/
|
||||
void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
|
||||
void *priv)
|
||||
{
|
||||
return __blk_mq_all_tag_iter(tags, fn, priv, 0);
|
||||
return __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -51,6 +51,14 @@ enum {
|
||||
BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
|
||||
};
|
||||
|
||||
bool __blk_mq_get_driver_tag(struct request *rq);
|
||||
static inline bool blk_mq_get_driver_tag(struct request *rq)
|
||||
{
|
||||
if (rq->tag != BLK_MQ_NO_TAG)
|
||||
return true;
|
||||
return __blk_mq_get_driver_tag(rq);
|
||||
}
|
||||
|
||||
extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
|
||||
extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
|
||||
|
||||
|
@ -1052,35 +1052,6 @@ static inline unsigned int queued_to_index(unsigned int queued)
|
||||
return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
|
||||
}
|
||||
|
||||
bool blk_mq_get_driver_tag(struct request *rq)
|
||||
{
|
||||
struct blk_mq_alloc_data data = {
|
||||
.q = rq->q,
|
||||
.hctx = rq->mq_hctx,
|
||||
.flags = BLK_MQ_REQ_NOWAIT,
|
||||
.cmd_flags = rq->cmd_flags,
|
||||
};
|
||||
bool shared;
|
||||
|
||||
if (rq->tag != BLK_MQ_NO_TAG)
|
||||
return true;
|
||||
|
||||
if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
|
||||
data.flags |= BLK_MQ_REQ_RESERVED;
|
||||
|
||||
shared = blk_mq_tag_busy(data.hctx);
|
||||
rq->tag = blk_mq_get_tag(&data);
|
||||
if (rq->tag >= 0) {
|
||||
if (shared) {
|
||||
rq->rq_flags |= RQF_MQ_INFLIGHT;
|
||||
atomic_inc(&data.hctx->nr_active);
|
||||
}
|
||||
data.hctx->tags->rqs[rq->tag] = rq;
|
||||
}
|
||||
|
||||
return rq->tag != BLK_MQ_NO_TAG;
|
||||
}
|
||||
|
||||
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
|
||||
int flags, void *key)
|
||||
{
|
||||
|
@ -44,7 +44,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
|
||||
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
|
||||
bool kick_requeue_list);
|
||||
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
|
||||
bool blk_mq_get_driver_tag(struct request *rq);
|
||||
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
|
||||
struct blk_mq_ctx *start);
|
||||
|
||||
|
@ -420,9 +420,11 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part)
|
||||
static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
|
||||
{
|
||||
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
||||
preempt_disable();
|
||||
write_seqcount_begin(&part->nr_sects_seq);
|
||||
part->nr_sects = size;
|
||||
write_seqcount_end(&part->nr_sects_seq);
|
||||
preempt_enable();
|
||||
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
|
||||
preempt_disable();
|
||||
part->nr_sects = size;
|
||||
|
@ -1390,7 +1390,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
|
||||
goto out_unfreeze;
|
||||
|
||||
/* Mask out flags that can't be set using LOOP_SET_STATUS. */
|
||||
lo->lo_flags &= ~LOOP_SET_STATUS_SETTABLE_FLAGS;
|
||||
lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
|
||||
/* For those flags, use the previous values instead */
|
||||
lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
|
||||
/* For flags that can't be cleared, use previous values too */
|
||||
|
@ -1613,7 +1613,7 @@ static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
|
||||
disc_information di;
|
||||
track_information ti;
|
||||
__u32 last_track;
|
||||
int ret = -1;
|
||||
int ret;
|
||||
|
||||
ret = pkt_get_disc_info(pd, &di);
|
||||
if (ret)
|
||||
|
@ -784,7 +784,7 @@ static const struct block_device_operations mm_fops = {
|
||||
|
||||
static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
||||
{
|
||||
int ret = -ENODEV;
|
||||
int ret;
|
||||
struct cardinfo *card = &cards[num_cards];
|
||||
unsigned char mem_present;
|
||||
unsigned char batt_status;
|
||||
|
@ -3669,7 +3669,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
||||
ns->disk = disk;
|
||||
|
||||
if (__nvme_revalidate_disk(disk, id))
|
||||
goto out_free_disk;
|
||||
goto out_put_disk;
|
||||
|
||||
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
|
||||
ret = nvme_nvm_register(ns, disk_name, node);
|
||||
@ -3696,8 +3696,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
||||
/* prevent double queue cleanup */
|
||||
ns->disk->queue = NULL;
|
||||
put_disk(ns->disk);
|
||||
out_free_disk:
|
||||
del_gendisk(ns->disk);
|
||||
out_unlink_ns:
|
||||
mutex_lock(&ctrl->subsys->lock);
|
||||
list_del_rcu(&ns->siblings);
|
||||
|
@ -2634,10 +2634,11 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
|
||||
opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
|
||||
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
|
||||
|
||||
if (!(op->flags & FCOP_FLAGS_AEN))
|
||||
if (!(op->flags & FCOP_FLAGS_AEN)) {
|
||||
nvme_fc_unmap_data(ctrl, op->rq, op);
|
||||
nvme_cleanup_cmd(op->rq);
|
||||
}
|
||||
|
||||
nvme_cleanup_cmd(op->rq);
|
||||
nvme_fc_ctrl_put(ctrl);
|
||||
|
||||
if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
|
||||
|
@ -599,8 +599,7 @@ static inline void nvme_trace_bio_complete(struct request *req,
|
||||
struct nvme_ns *ns = req->q->queuedata;
|
||||
|
||||
if (req->cmd_flags & REQ_NVME_MPATH)
|
||||
trace_block_bio_complete(ns->head->disk->queue,
|
||||
req->bio, status);
|
||||
trace_block_bio_complete(ns->head->disk->queue, req->bio);
|
||||
}
|
||||
|
||||
extern struct device_attribute dev_attr_ana_grpid;
|
||||
|
@ -2950,9 +2950,15 @@ static int nvme_suspend(struct device *dev)
|
||||
* the PCI bus layer to put it into D3 in order to take the PCIe link
|
||||
* down, so as to allow the platform to achieve its minimum low-power
|
||||
* state (which may not be possible if the link is up).
|
||||
*
|
||||
* If a host memory buffer is enabled, shut down the device as the NVMe
|
||||
* specification allows the device to access the host memory buffer in
|
||||
* host DRAM from all power states, but hosts will fail access to DRAM
|
||||
* during S3.
|
||||
*/
|
||||
if (pm_suspend_via_firmware() || !ctrl->npss ||
|
||||
!pcie_aspm_enabled(pdev) ||
|
||||
ndev->nr_host_mem_descs ||
|
||||
(ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
|
||||
return nvme_disable_prepare_reset(ndev, true);
|
||||
|
||||
|
@ -131,8 +131,8 @@ struct nvme_tcp_ctrl {
|
||||
static LIST_HEAD(nvme_tcp_ctrl_list);
|
||||
static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
|
||||
static struct workqueue_struct *nvme_tcp_wq;
|
||||
static struct blk_mq_ops nvme_tcp_mq_ops;
|
||||
static struct blk_mq_ops nvme_tcp_admin_mq_ops;
|
||||
static const struct blk_mq_ops nvme_tcp_mq_ops;
|
||||
static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
|
||||
static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
|
||||
|
||||
static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
|
||||
@ -2301,7 +2301,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
|
||||
return queue->nr_cqe;
|
||||
}
|
||||
|
||||
static struct blk_mq_ops nvme_tcp_mq_ops = {
|
||||
static const struct blk_mq_ops nvme_tcp_mq_ops = {
|
||||
.queue_rq = nvme_tcp_queue_rq,
|
||||
.complete = nvme_complete_rq,
|
||||
.init_request = nvme_tcp_init_request,
|
||||
@ -2312,7 +2312,7 @@ static struct blk_mq_ops nvme_tcp_mq_ops = {
|
||||
.poll = nvme_tcp_poll,
|
||||
};
|
||||
|
||||
static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
|
||||
static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
|
||||
.queue_rq = nvme_tcp_queue_rq,
|
||||
.complete = nvme_complete_rq,
|
||||
.init_request = nvme_tcp_init_request,
|
||||
|
@ -129,7 +129,22 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
|
||||
return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
|
||||
}
|
||||
|
||||
static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
|
||||
static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
|
||||
{
|
||||
u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
struct nvmet_req *req;
|
||||
|
||||
mutex_lock(&ctrl->lock);
|
||||
while (ctrl->nr_async_event_cmds) {
|
||||
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
|
||||
mutex_unlock(&ctrl->lock);
|
||||
nvmet_req_complete(req, status);
|
||||
mutex_lock(&ctrl->lock);
|
||||
}
|
||||
mutex_unlock(&ctrl->lock);
|
||||
}
|
||||
|
||||
static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
|
||||
{
|
||||
struct nvmet_async_event *aen;
|
||||
struct nvmet_req *req;
|
||||
@ -139,15 +154,14 @@ static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
|
||||
aen = list_first_entry(&ctrl->async_events,
|
||||
struct nvmet_async_event, entry);
|
||||
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
|
||||
if (status == 0)
|
||||
nvmet_set_result(req, nvmet_async_event_result(aen));
|
||||
nvmet_set_result(req, nvmet_async_event_result(aen));
|
||||
|
||||
list_del(&aen->entry);
|
||||
kfree(aen);
|
||||
|
||||
mutex_unlock(&ctrl->lock);
|
||||
trace_nvmet_async_event(ctrl, req->cqe->result.u32);
|
||||
nvmet_req_complete(req, status);
|
||||
nvmet_req_complete(req, 0);
|
||||
mutex_lock(&ctrl->lock);
|
||||
}
|
||||
mutex_unlock(&ctrl->lock);
|
||||
@ -170,7 +184,7 @@ static void nvmet_async_event_work(struct work_struct *work)
|
||||
struct nvmet_ctrl *ctrl =
|
||||
container_of(work, struct nvmet_ctrl, async_event_work);
|
||||
|
||||
nvmet_async_events_process(ctrl, 0);
|
||||
nvmet_async_events_process(ctrl);
|
||||
}
|
||||
|
||||
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
|
||||
@ -779,7 +793,6 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
|
||||
|
||||
void nvmet_sq_destroy(struct nvmet_sq *sq)
|
||||
{
|
||||
u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
struct nvmet_ctrl *ctrl = sq->ctrl;
|
||||
|
||||
/*
|
||||
@ -787,7 +800,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
|
||||
* queue doesn't have outstanding requests on it.
|
||||
*/
|
||||
if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
|
||||
nvmet_async_events_process(ctrl, status);
|
||||
nvmet_async_events_failall(ctrl);
|
||||
percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
|
||||
wait_for_completion(&sq->confirm_done);
|
||||
wait_for_completion(&sq->free_done);
|
||||
|
@ -153,7 +153,7 @@ static LIST_HEAD(nvmet_tcp_queue_list);
|
||||
static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
|
||||
|
||||
static struct workqueue_struct *nvmet_tcp_wq;
|
||||
static struct nvmet_fabrics_ops nvmet_tcp_ops;
|
||||
static const struct nvmet_fabrics_ops nvmet_tcp_ops;
|
||||
static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
|
||||
static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
|
||||
|
||||
@ -1713,7 +1713,7 @@ static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
|
||||
}
|
||||
}
|
||||
|
||||
static struct nvmet_fabrics_ops nvmet_tcp_ops = {
|
||||
static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.type = NVMF_TRTYPE_TCP,
|
||||
.msdbd = 1,
|
||||
|
@ -261,9 +261,9 @@ TRACE_EVENT(block_bio_bounce,
|
||||
*/
|
||||
TRACE_EVENT(block_bio_complete,
|
||||
|
||||
TP_PROTO(struct request_queue *q, struct bio *bio, int error),
|
||||
TP_PROTO(struct request_queue *q, struct bio *bio),
|
||||
|
||||
TP_ARGS(q, bio, error),
|
||||
TP_ARGS(q, bio),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
@ -277,7 +277,7 @@ TRACE_EVENT(block_bio_complete,
|
||||
__entry->dev = bio_dev(bio);
|
||||
__entry->sector = bio->bi_iter.bi_sector;
|
||||
__entry->nr_sector = bio_sectors(bio);
|
||||
__entry->error = error;
|
||||
__entry->error = blk_status_to_errno(bio->bi_status);
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
|
||||
),
|
||||
|
||||
|
@ -885,10 +885,10 @@ static void blk_add_trace_bio_bounce(void *ignore,
|
||||
}
|
||||
|
||||
static void blk_add_trace_bio_complete(void *ignore,
|
||||
struct request_queue *q, struct bio *bio,
|
||||
int error)
|
||||
struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
|
||||
blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
|
||||
blk_status_to_errno(bio->bi_status));
|
||||
}
|
||||
|
||||
static void blk_add_trace_bio_backmerge(void *ignore,
|
||||
@ -995,8 +995,10 @@ static void blk_add_trace_split(void *ignore,
|
||||
|
||||
__blk_add_trace(bt, bio->bi_iter.bi_sector,
|
||||
bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
|
||||
BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
|
||||
&rpdu, blk_trace_bio_get_cgid(q, bio));
|
||||
BLK_TA_SPLIT,
|
||||
blk_status_to_errno(bio->bi_status),
|
||||
sizeof(rpdu), &rpdu,
|
||||
blk_trace_bio_get_cgid(q, bio));
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
@ -1033,7 +1035,8 @@ static void blk_add_trace_bio_remap(void *ignore,
|
||||
r.sector_from = cpu_to_be64(from);
|
||||
|
||||
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
|
||||
bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
|
||||
bio_op(bio), bio->bi_opf, BLK_TA_REMAP,
|
||||
blk_status_to_errno(bio->bi_status),
|
||||
sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
|
||||
rcu_read_unlock();
|
||||
}
|
||||
@ -1253,21 +1256,10 @@ static inline __u16 t_error(const struct trace_entry *ent)
|
||||
|
||||
static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
|
||||
{
|
||||
const __u64 *val = pdu_start(ent, has_cg);
|
||||
const __be64 *val = pdu_start(ent, has_cg);
|
||||
return be64_to_cpu(*val);
|
||||
}
|
||||
|
||||
static void get_pdu_remap(const struct trace_entry *ent,
|
||||
struct blk_io_trace_remap *r, bool has_cg)
|
||||
{
|
||||
const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
|
||||
__u64 sector_from = __r->sector_from;
|
||||
|
||||
r->device_from = be32_to_cpu(__r->device_from);
|
||||
r->device_to = be32_to_cpu(__r->device_to);
|
||||
r->sector_from = be64_to_cpu(sector_from);
|
||||
}
|
||||
|
||||
typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
|
||||
bool has_cg);
|
||||
|
||||
@ -1407,13 +1399,13 @@ static void blk_log_with_error(struct trace_seq *s,
|
||||
|
||||
static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
|
||||
{
|
||||
struct blk_io_trace_remap r = { .device_from = 0, };
|
||||
const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
|
||||
|
||||
get_pdu_remap(ent, &r, has_cg);
|
||||
trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
|
||||
t_sector(ent), t_sec(ent),
|
||||
MAJOR(r.device_from), MINOR(r.device_from),
|
||||
(unsigned long long)r.sector_from);
|
||||
MAJOR(be32_to_cpu(__r->device_from)),
|
||||
MINOR(be32_to_cpu(__r->device_from)),
|
||||
be64_to_cpu(__r->sector_from));
|
||||
}
|
||||
|
||||
static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
|
||||
|
Loading…
Reference in New Issue
Block a user