mirror of
https://github.com/torvalds/linux.git
synced 2024-12-11 13:41:55 +00:00
nvme fixes for 5.9
- fix error during controller probe that cause double free irqs (Keith Busch) - FC connection establishment fix (James Smart) - properly handle completions for invalid tags (Xianting Tian) - pass the correct nsid to the command effects and supported log (Chaitanya Kulkarni) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl9sooALHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYOk6RAAx8/Wad9B1/pTAl20StWil4w7Ck4SPsCQwAOlEjON ldWTTAw4uUxUzP80qy6w0yOOyI1IJBjNtHPO+uOv99H7jxNNTdtbt5MCJCYKh7FH Q+motRpNwS9mHLnJt9Yuz2aP7C84CPE8HyxJzSSpIkyA3JejZOlmxikSHByDahPS jJIWtKXVij9VnGYLGB9zoiQ+HMqaX+5IcrhYJtfqkdmCA7VkuJsrQpzXwjwgbqu5 61H86a78Ogde3B7L3NLx56Wj9eJXJsYnR88OrJismYV54lMddzuTq3u5O2ac+3H9 tfMSoPEPODZpvZpmf33jMT5XeOXXlHhtdazk+2y0Fvmry5RMLRqJt6DCbksuy8x0 5JYwvb0BchmFgfgur7WMl6GbWOhD2NLYj9QvTsd6tkVMgQGOBg3I30uxW2fvzrHi 7FU0oSv9HaKqAgTXtXAOhJgRkz/V3vnlLQo9OH759E7vyyXI4FXsa+foQZjesHsq hFkl6UEdY37AZSO0Qu00o6ZRV20be1oqyCQO4mNOmyU0iLZitOeS7MIDZ62qxSu0 VvOWRGjMSahcaPa97Oeg/ztmkQD4yY7e9Fk0YQ1rVDc+E3uhkFZ3FOi0mIPyjTd1 t/5b1tdYli2mQQtHr1EzVuyoNiH4Tf/2kbynUgDu03U7D2wsH3bQVyjJsLEEtSm6 AXM= =0x/v -----END PGP SIGNATURE----- Merge tag 'nvme-5.9-2020-09-24' of git://git.infradead.org/nvme into block-5.9 Pull NVMe fixes from Christoph: "nvme fixes for 5.9 - fix error during controller probe that cause double free irqs (Keith Busch) - FC connection establishment fix (James Smart) - properly handle completions for invalid tags (Xianting Tian) - pass the correct nsid to the command effects and supported log (Chaitanya Kulkarni)" * tag 'nvme-5.9-2020-09-24' of git://git.infradead.org/nvme: nvme-core: don't use NVME_NSID_ALL for command effects and supported log nvme-fc: fail new connections to a deleted host or remote port nvme-pci: fix NULL req in completion handler nvme: return errors for hwmon init
This commit is contained in:
commit
9754d6cb63
@ -3041,7 +3041,7 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
|
||||
if (!cel)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, csi,
|
||||
ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
|
||||
&cel->log, sizeof(cel->log), 0);
|
||||
if (ret) {
|
||||
kfree(cel);
|
||||
@ -3236,8 +3236,11 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!ctrl->identified)
|
||||
nvme_hwmon_init(ctrl);
|
||||
if (!ctrl->identified) {
|
||||
ret = nvme_hwmon_init(ctrl);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ctrl->identified = true;
|
||||
|
||||
|
@ -3671,12 +3671,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
|
||||
spin_lock_irqsave(&nvme_fc_lock, flags);
|
||||
list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
|
||||
if (lport->localport.node_name != laddr.nn ||
|
||||
lport->localport.port_name != laddr.pn)
|
||||
lport->localport.port_name != laddr.pn ||
|
||||
lport->localport.port_state != FC_OBJSTATE_ONLINE)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(rport, &lport->endp_list, endp_list) {
|
||||
if (rport->remoteport.node_name != raddr.nn ||
|
||||
rport->remoteport.port_name != raddr.pn)
|
||||
rport->remoteport.port_name != raddr.pn ||
|
||||
rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
|
||||
continue;
|
||||
|
||||
/* if fail to get reference fall through. Will error */
|
||||
|
@ -59,12 +59,8 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
|
||||
|
||||
static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
|
||||
return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
|
||||
NVME_CSI_NVM, &data->log, sizeof(data->log), 0);
|
||||
|
||||
return ret <= 0 ? ret : -EIO;
|
||||
}
|
||||
|
||||
static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
|
||||
@ -225,7 +221,7 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = {
|
||||
.info = nvme_hwmon_info,
|
||||
};
|
||||
|
||||
void nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
int nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
struct device *dev = ctrl->dev;
|
||||
struct nvme_hwmon_data *data;
|
||||
@ -234,7 +230,7 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
|
||||
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
data->ctrl = ctrl;
|
||||
mutex_init(&data->read_lock);
|
||||
@ -244,7 +240,7 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
dev_warn(ctrl->device,
|
||||
"Failed to read smart log (error %d)\n", err);
|
||||
devm_kfree(dev, data);
|
||||
return;
|
||||
return err;
|
||||
}
|
||||
|
||||
hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
|
||||
@ -254,4 +250,6 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
dev_warn(dev, "Failed to instantiate hwmon device\n");
|
||||
devm_kfree(dev, data);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -827,9 +827,12 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NVME_HWMON
|
||||
void nvme_hwmon_init(struct nvme_ctrl *ctrl);
|
||||
int nvme_hwmon_init(struct nvme_ctrl *ctrl);
|
||||
#else
|
||||
static inline void nvme_hwmon_init(struct nvme_ctrl *ctrl) { }
|
||||
static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
|
@ -940,13 +940,6 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
||||
struct nvme_completion *cqe = &nvmeq->cqes[idx];
|
||||
struct request *req;
|
||||
|
||||
if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
|
||||
dev_warn(nvmeq->dev->ctrl.device,
|
||||
"invalid id %d completed on queue %d\n",
|
||||
cqe->command_id, le16_to_cpu(cqe->sq_id));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* AEN requests are special as they don't time out and can
|
||||
* survive any kind of queue freeze and often don't respond to
|
||||
@ -960,6 +953,13 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
||||
}
|
||||
|
||||
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
|
||||
if (unlikely(!req)) {
|
||||
dev_warn(nvmeq->dev->ctrl.device,
|
||||
"invalid id %d completed on queue %d\n",
|
||||
cqe->command_id, le16_to_cpu(cqe->sq_id));
|
||||
return;
|
||||
}
|
||||
|
||||
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
|
||||
if (!nvme_try_complete_req(req, cqe->status, cqe->result))
|
||||
nvme_pci_complete_rq(req);
|
||||
|
Loading…
Reference in New Issue
Block a user