forked from Minki/linux
SCSI misc on 20220524
This series consists of a small set of driver updates (lpfc, ufs, mpt3sas mpi3mr, iscsi target). Apart from that this is mostly small fixes with very few core changes (the biggest one being VPD caching. Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCYo2WnyYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishfEiAP4zvniL xidsiCXGQ4pWF4QW3UxukXpGh5xFREhNCYT9+QEA+DyilCALOI+ZT5GKu2V6gkby R29ve48/NAWl3fwYjMQ= =GPL1 -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI updates from James Bottomley: "This consists of a small set of driver updates (lpfc, ufs, mpt3sas mpi3mr, iscsi target). Apart from that this is mostly small fixes with very few core changes (the biggest one being VPD caching)" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (177 commits) scsi: target: tcmu: Avoid holding XArray lock when calling lock_page scsi: elx: efct: Remove NULL check after calling container_of() scsi: dpt_i2o: Drop redundant spinlock initialization scsi: qedf: Remove redundant variable op scsi: hisi_sas: Fix memory ordering in hisi_sas_task_deliver() scsi: fnic: Replace DMA mask of 64 bits with 47 bits scsi: mpi3mr: Add target device related sysfs attributes scsi: mpi3mr: Add shost related sysfs attributes scsi: elx: efct: Remove redundant memset() statement scsi: megaraid_sas: Remove redundant memset() statement scsi: mpi3mr: Return error if dma_alloc_coherent() fails scsi: hisi_sas: Fix rescan after deleting a disk scsi: hisi_sas: Use sas_ata_wait_after_reset() in IT nexus reset scsi: libsas: Refactor sas_ata_hard_reset() scsi: mpt3sas: Update driver version to 42.100.00.00 scsi: mpt3sas: Fix junk chars displayed while printing ChipName scsi: ipr: Use kobj_to_dev() scsi: mpi3mr: Fix a NULL vs IS_ERR() bug in mpi3mr_bsg_init() scsi: bnx2fc: Avoid using get_cpu() in bnx2fc_cmd_alloc() scsi: libfc: Remove get_cpu() semantics in fc_exch_em_alloc() ...
This commit is contained in:
commit
fbe86daca0
@ -1518,7 +1518,7 @@ Description: This entry shows the number of reads that cannot be changed to
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_stats/rb_noti_cnt
|
||||
What: /sys/class/scsi_device/*/device/hpb_stats/rcmd_noti_cnt
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the number of response UPIUs that has
|
||||
@ -1526,19 +1526,23 @@ Description: This entry shows the number of response UPIUs that has
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_stats/rb_active_cnt
|
||||
What: /sys/class/scsi_device/*/device/hpb_stats/rcmd_active_cnt
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the number of active sub-regions recommended by
|
||||
response UPIUs.
|
||||
Description: For the HPB device control mode, this entry shows the number of
|
||||
active sub-regions recommended by response UPIUs. For the HPB host control
|
||||
mode, this entry shows the number of active sub-regions recommended by the
|
||||
HPB host control mode heuristic algorithm.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_stats/rb_inactive_cnt
|
||||
What: /sys/class/scsi_device/*/device/hpb_stats/rcmd_inactive_cnt
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the number of inactive regions recommended by
|
||||
response UPIUs.
|
||||
Description: For the HPB device control mode, this entry shows the number of
|
||||
inactive regions recommended by response UPIUs. For the HPB host control
|
||||
mode, this entry shows the number of inactive regions recommended by the
|
||||
HPB host control mode heuristic algorithm.
|
||||
|
||||
The file is read only.
|
||||
|
||||
|
@ -46,7 +46,7 @@ static struct workqueue_struct *isert_comp_wq;
|
||||
static struct workqueue_struct *isert_release_wq;
|
||||
|
||||
static int
|
||||
isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
|
||||
isert_put_response(struct iscsit_conn *conn, struct iscsit_cmd *cmd);
|
||||
static int
|
||||
isert_login_post_recv(struct isert_conn *isert_conn);
|
||||
static int
|
||||
@ -909,7 +909,7 @@ isert_login_post_recv(struct isert_conn *isert_conn)
|
||||
}
|
||||
|
||||
static int
|
||||
isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
|
||||
isert_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login,
|
||||
u32 length)
|
||||
{
|
||||
struct isert_conn *isert_conn = conn->context;
|
||||
@ -976,7 +976,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
|
||||
{
|
||||
struct iser_rx_desc *rx_desc = isert_conn->login_desc;
|
||||
int rx_buflen = isert_conn->login_req_len;
|
||||
struct iscsi_conn *conn = isert_conn->conn;
|
||||
struct iscsit_conn *conn = isert_conn->conn;
|
||||
struct iscsi_login *login = conn->conn_login;
|
||||
int size;
|
||||
|
||||
@ -1020,21 +1020,21 @@ isert_rx_login_req(struct isert_conn *isert_conn)
|
||||
schedule_delayed_work(&conn->login_work, 0);
|
||||
}
|
||||
|
||||
static struct iscsi_cmd
|
||||
*isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc)
|
||||
static struct iscsit_cmd
|
||||
*isert_allocate_cmd(struct iscsit_conn *conn, struct iser_rx_desc *rx_desc)
|
||||
{
|
||||
struct isert_conn *isert_conn = conn->context;
|
||||
struct isert_cmd *isert_cmd;
|
||||
struct iscsi_cmd *cmd;
|
||||
struct iscsit_cmd *cmd;
|
||||
|
||||
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
|
||||
if (!cmd) {
|
||||
isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
|
||||
isert_err("Unable to allocate iscsit_cmd + isert_cmd\n");
|
||||
return NULL;
|
||||
}
|
||||
isert_cmd = iscsit_priv_cmd(cmd);
|
||||
isert_cmd->conn = isert_conn;
|
||||
isert_cmd->iscsi_cmd = cmd;
|
||||
isert_cmd->iscsit_cmd = cmd;
|
||||
isert_cmd->rx_desc = rx_desc;
|
||||
|
||||
return cmd;
|
||||
@ -1042,10 +1042,10 @@ static struct iscsi_cmd
|
||||
|
||||
static int
|
||||
isert_handle_scsi_cmd(struct isert_conn *isert_conn,
|
||||
struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
|
||||
struct isert_cmd *isert_cmd, struct iscsit_cmd *cmd,
|
||||
struct iser_rx_desc *rx_desc, unsigned char *buf)
|
||||
{
|
||||
struct iscsi_conn *conn = isert_conn->conn;
|
||||
struct iscsit_conn *conn = isert_conn->conn;
|
||||
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
|
||||
int imm_data, imm_data_len, unsol_data, sg_nents, rc;
|
||||
bool dump_payload = false;
|
||||
@ -1114,8 +1114,8 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
|
||||
struct iser_rx_desc *rx_desc, unsigned char *buf)
|
||||
{
|
||||
struct scatterlist *sg_start;
|
||||
struct iscsi_conn *conn = isert_conn->conn;
|
||||
struct iscsi_cmd *cmd = NULL;
|
||||
struct iscsit_conn *conn = isert_conn->conn;
|
||||
struct iscsit_cmd *cmd = NULL;
|
||||
struct iscsi_data *hdr = (struct iscsi_data *)buf;
|
||||
u32 unsol_data_len = ntoh24(hdr->dlength);
|
||||
int rc, sg_nents, sg_off, page_off;
|
||||
@ -1171,10 +1171,10 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
|
||||
|
||||
static int
|
||||
isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
|
||||
struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
|
||||
struct iscsit_cmd *cmd, struct iser_rx_desc *rx_desc,
|
||||
unsigned char *buf)
|
||||
{
|
||||
struct iscsi_conn *conn = isert_conn->conn;
|
||||
struct iscsit_conn *conn = isert_conn->conn;
|
||||
struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
|
||||
int rc;
|
||||
|
||||
@ -1190,10 +1190,10 @@ isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
|
||||
|
||||
static int
|
||||
isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
|
||||
struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
|
||||
struct iscsit_cmd *cmd, struct iser_rx_desc *rx_desc,
|
||||
struct iscsi_text *hdr)
|
||||
{
|
||||
struct iscsi_conn *conn = isert_conn->conn;
|
||||
struct iscsit_conn *conn = isert_conn->conn;
|
||||
u32 payload_length = ntoh24(hdr->dlength);
|
||||
int rc;
|
||||
unsigned char *text_in = NULL;
|
||||
@ -1220,8 +1220,8 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
|
||||
uint32_t write_stag, uint64_t write_va)
|
||||
{
|
||||
struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
|
||||
struct iscsi_conn *conn = isert_conn->conn;
|
||||
struct iscsi_cmd *cmd;
|
||||
struct iscsit_conn *conn = isert_conn->conn;
|
||||
struct iscsit_cmd *cmd;
|
||||
struct isert_cmd *isert_cmd;
|
||||
int ret = -EINVAL;
|
||||
u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
|
||||
@ -1404,7 +1404,7 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
static void
|
||||
isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
|
||||
{
|
||||
struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
|
||||
struct se_cmd *se_cmd = &cmd->iscsit_cmd->se_cmd;
|
||||
enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
|
||||
|
||||
if (!cmd->rw.nr_ops)
|
||||
@ -1426,9 +1426,9 @@ isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
|
||||
static void
|
||||
isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
|
||||
{
|
||||
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
|
||||
struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
|
||||
struct isert_conn *isert_conn = isert_cmd->conn;
|
||||
struct iscsi_conn *conn = isert_conn->conn;
|
||||
struct iscsit_conn *conn = isert_conn->conn;
|
||||
struct iscsi_text_rsp *hdr;
|
||||
|
||||
isert_dbg("Cmd %p\n", isert_cmd);
|
||||
@ -1575,7 +1575,7 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
struct isert_device *device = isert_conn->device;
|
||||
struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
|
||||
struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
|
||||
struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd;
|
||||
struct se_cmd *cmd = &isert_cmd->iscsit_cmd->se_cmd;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(wc->status != IB_WC_SUCCESS)) {
|
||||
@ -1604,7 +1604,7 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
/*
|
||||
* XXX: isert_put_response() failure is not retried.
|
||||
*/
|
||||
ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
|
||||
ret = isert_put_response(isert_conn->conn, isert_cmd->iscsit_cmd);
|
||||
if (ret)
|
||||
pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
|
||||
}
|
||||
@ -1617,7 +1617,7 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
struct isert_device *device = isert_conn->device;
|
||||
struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
|
||||
struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
|
||||
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
|
||||
struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
|
||||
struct se_cmd *se_cmd = &cmd->se_cmd;
|
||||
int ret = 0;
|
||||
|
||||
@ -1662,7 +1662,7 @@ isert_do_control_comp(struct work_struct *work)
|
||||
struct isert_cmd, comp_work);
|
||||
struct isert_conn *isert_conn = isert_cmd->conn;
|
||||
struct ib_device *ib_dev = isert_conn->cm_id->device;
|
||||
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
|
||||
struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
|
||||
|
||||
isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
|
||||
|
||||
@ -1720,7 +1720,7 @@ isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
|
||||
isert_dbg("Cmd %p\n", isert_cmd);
|
||||
|
||||
switch (isert_cmd->iscsi_cmd->i_state) {
|
||||
switch (isert_cmd->iscsit_cmd->i_state) {
|
||||
case ISTATE_SEND_TASKMGTRSP:
|
||||
case ISTATE_SEND_LOGOUTRSP:
|
||||
case ISTATE_SEND_REJECT:
|
||||
@ -1731,7 +1731,7 @@ isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
queue_work(isert_comp_wq, &isert_cmd->comp_work);
|
||||
return;
|
||||
default:
|
||||
isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS;
|
||||
isert_cmd->iscsit_cmd->i_state = ISTATE_SENT_STATUS;
|
||||
isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
|
||||
break;
|
||||
}
|
||||
@ -1755,7 +1755,7 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
|
||||
}
|
||||
|
||||
static int
|
||||
isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
||||
isert_put_response(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
|
||||
{
|
||||
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
||||
struct isert_conn *isert_conn = conn->context;
|
||||
@ -1806,7 +1806,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
||||
}
|
||||
|
||||
static void
|
||||
isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
||||
isert_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
|
||||
{
|
||||
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
||||
struct isert_conn *isert_conn = conn->context;
|
||||
@ -1822,7 +1822,7 @@ isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
||||
}
|
||||
|
||||
static enum target_prot_op
|
||||
isert_get_sup_prot_ops(struct iscsi_conn *conn)
|
||||
isert_get_sup_prot_ops(struct iscsit_conn *conn)
|
||||
{
|
||||
struct isert_conn *isert_conn = conn->context;
|
||||
struct isert_device *device = isert_conn->device;
|
||||
@ -1842,7 +1842,7 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn)
|
||||
}
|
||||
|
||||
static int
|
||||
isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
|
||||
isert_put_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
|
||||
bool nopout_response)
|
||||
{
|
||||
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
||||
@ -1862,7 +1862,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
|
||||
}
|
||||
|
||||
static int
|
||||
isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
||||
isert_put_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
|
||||
{
|
||||
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
||||
struct isert_conn *isert_conn = conn->context;
|
||||
@ -1880,7 +1880,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
||||
}
|
||||
|
||||
static int
|
||||
isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
||||
isert_put_tm_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
|
||||
{
|
||||
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
||||
struct isert_conn *isert_conn = conn->context;
|
||||
@ -1898,7 +1898,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
||||
}
|
||||
|
||||
static int
|
||||
isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
||||
isert_put_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
|
||||
{
|
||||
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
||||
struct isert_conn *isert_conn = conn->context;
|
||||
@ -1933,7 +1933,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
||||
}
|
||||
|
||||
static int
|
||||
isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
||||
isert_put_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
|
||||
{
|
||||
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
||||
struct isert_conn *isert_conn = conn->context;
|
||||
@ -2035,7 +2035,7 @@ static int
|
||||
isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
|
||||
struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
|
||||
{
|
||||
struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
|
||||
struct se_cmd *se_cmd = &cmd->iscsit_cmd->se_cmd;
|
||||
enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
|
||||
u8 port_num = conn->cm_id->port_num;
|
||||
u64 addr;
|
||||
@ -2048,7 +2048,7 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
|
||||
if (dir == DMA_FROM_DEVICE) {
|
||||
addr = cmd->write_va;
|
||||
rkey = cmd->write_stag;
|
||||
offset = cmd->iscsi_cmd->write_data_done;
|
||||
offset = cmd->iscsit_cmd->write_data_done;
|
||||
} else {
|
||||
addr = cmd->read_va;
|
||||
rkey = cmd->read_stag;
|
||||
@ -2088,7 +2088,7 @@ rdma_ctx_post:
|
||||
}
|
||||
|
||||
static int
|
||||
isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
||||
isert_put_datain(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
|
||||
{
|
||||
struct se_cmd *se_cmd = &cmd->se_cmd;
|
||||
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
||||
@ -2129,7 +2129,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
||||
}
|
||||
|
||||
static int
|
||||
isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
|
||||
isert_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd, bool recovery)
|
||||
{
|
||||
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
||||
int ret;
|
||||
@ -2147,7 +2147,7 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
|
||||
}
|
||||
|
||||
static int
|
||||
isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
|
||||
isert_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
|
||||
{
|
||||
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
||||
int ret = 0;
|
||||
@ -2172,7 +2172,7 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
|
||||
}
|
||||
|
||||
static int
|
||||
isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
|
||||
isert_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
|
||||
{
|
||||
struct isert_conn *isert_conn = conn->context;
|
||||
int ret;
|
||||
@ -2332,7 +2332,7 @@ isert_rdma_accept(struct isert_conn *isert_conn)
|
||||
}
|
||||
|
||||
static int
|
||||
isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
|
||||
isert_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login)
|
||||
{
|
||||
struct isert_conn *isert_conn = conn->context;
|
||||
int ret;
|
||||
@ -2368,7 +2368,7 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
|
||||
}
|
||||
|
||||
static void
|
||||
isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
|
||||
isert_set_conn_info(struct iscsi_np *np, struct iscsit_conn *conn,
|
||||
struct isert_conn *isert_conn)
|
||||
{
|
||||
struct rdma_cm_id *cm_id = isert_conn->cm_id;
|
||||
@ -2381,7 +2381,7 @@ isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
|
||||
}
|
||||
|
||||
static int
|
||||
isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
|
||||
isert_accept_np(struct iscsi_np *np, struct iscsit_conn *conn)
|
||||
{
|
||||
struct isert_np *isert_np = np->np_context;
|
||||
struct isert_conn *isert_conn;
|
||||
@ -2489,7 +2489,7 @@ static void isert_release_work(struct work_struct *work)
|
||||
static void
|
||||
isert_wait4logout(struct isert_conn *isert_conn)
|
||||
{
|
||||
struct iscsi_conn *conn = isert_conn->conn;
|
||||
struct iscsit_conn *conn = isert_conn->conn;
|
||||
|
||||
isert_info("conn %p\n", isert_conn);
|
||||
|
||||
@ -2501,9 +2501,9 @@ isert_wait4logout(struct isert_conn *isert_conn)
|
||||
}
|
||||
|
||||
static void
|
||||
isert_wait4cmds(struct iscsi_conn *conn)
|
||||
isert_wait4cmds(struct iscsit_conn *conn)
|
||||
{
|
||||
isert_info("iscsi_conn %p\n", conn);
|
||||
isert_info("iscsit_conn %p\n", conn);
|
||||
|
||||
if (conn->sess) {
|
||||
target_stop_session(conn->sess->se_sess);
|
||||
@ -2521,9 +2521,9 @@ isert_wait4cmds(struct iscsi_conn *conn)
|
||||
* before blocking on the target_wait_for_session_cmds
|
||||
*/
|
||||
static void
|
||||
isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
|
||||
isert_put_unsol_pending_cmds(struct iscsit_conn *conn)
|
||||
{
|
||||
struct iscsi_cmd *cmd, *tmp;
|
||||
struct iscsit_cmd *cmd, *tmp;
|
||||
static LIST_HEAD(drop_cmd_list);
|
||||
|
||||
spin_lock_bh(&conn->cmd_lock);
|
||||
@ -2546,7 +2546,7 @@ isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
|
||||
}
|
||||
}
|
||||
|
||||
static void isert_wait_conn(struct iscsi_conn *conn)
|
||||
static void isert_wait_conn(struct iscsit_conn *conn)
|
||||
{
|
||||
struct isert_conn *isert_conn = conn->context;
|
||||
|
||||
@ -2564,7 +2564,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
|
||||
queue_work(isert_release_wq, &isert_conn->release_work);
|
||||
}
|
||||
|
||||
static void isert_free_conn(struct iscsi_conn *conn)
|
||||
static void isert_free_conn(struct iscsit_conn *conn)
|
||||
{
|
||||
struct isert_conn *isert_conn = conn->context;
|
||||
|
||||
@ -2572,7 +2572,7 @@ static void isert_free_conn(struct iscsi_conn *conn)
|
||||
isert_put_conn(isert_conn);
|
||||
}
|
||||
|
||||
static void isert_get_rx_pdu(struct iscsi_conn *conn)
|
||||
static void isert_get_rx_pdu(struct iscsit_conn *conn)
|
||||
{
|
||||
struct completion comp;
|
||||
|
||||
|
@ -146,7 +146,7 @@ struct isert_cmd {
|
||||
u64 pdu_buf_dma;
|
||||
u32 pdu_buf_len;
|
||||
struct isert_conn *conn;
|
||||
struct iscsi_cmd *iscsi_cmd;
|
||||
struct iscsit_cmd *iscsit_cmd;
|
||||
struct iser_tx_desc tx_desc;
|
||||
struct iser_rx_desc *rx_desc;
|
||||
struct rdma_rw_ctx rw;
|
||||
@ -173,7 +173,7 @@ struct isert_conn {
|
||||
u64 login_rsp_dma;
|
||||
struct iser_rx_desc *rx_descs;
|
||||
struct ib_recv_wr rx_wr[ISERT_QP_MAX_RECV_DTOS];
|
||||
struct iscsi_conn *conn;
|
||||
struct iscsit_conn *conn;
|
||||
struct list_head node;
|
||||
struct completion login_comp;
|
||||
struct completion login_req_comp;
|
||||
|
@ -2334,7 +2334,6 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
|
||||
ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
|
||||
MPT_FRAME_HDR *mf = NULL;
|
||||
unsigned long timeleft;
|
||||
int retval;
|
||||
u32 msgcontext;
|
||||
|
||||
/* Reset long to int. Should affect IA64 and SPARC only
|
||||
@ -2488,7 +2487,6 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
|
||||
ioc->add_sge((char *)&IstwiRWRequest->SGL,
|
||||
(MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma);
|
||||
|
||||
retval = 0;
|
||||
SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context,
|
||||
IstwiRWRequest->MsgContext);
|
||||
INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
|
||||
@ -2498,7 +2496,6 @@ retry_wait:
|
||||
timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
|
||||
HZ*MPT_IOCTL_DEFAULT_TIMEOUT);
|
||||
if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
|
||||
retval = -ETIME;
|
||||
printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__);
|
||||
if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
|
||||
mpt_free_msg_frame(ioc, mf);
|
||||
|
@ -121,7 +121,7 @@ enum {
|
||||
#define SA_AIF_PDEV_CHANGE (1<<4)
|
||||
#define SA_AIF_LDEV_CHANGE (1<<5)
|
||||
#define SA_AIF_BPSTAT_CHANGE (1<<30)
|
||||
#define SA_AIF_BPCFG_CHANGE (1<<31)
|
||||
#define SA_AIF_BPCFG_CHANGE (1U<<31)
|
||||
|
||||
#define HBA_MAX_SG_EMBEDDED 28
|
||||
#define HBA_MAX_SG_SEPARATE 90
|
||||
|
@ -302,7 +302,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
|
||||
if (flag & SCRD)
|
||||
printk("SCRD ");
|
||||
printk("status %02x\n", inb(STATUS(sh->io_port)));
|
||||
};
|
||||
}
|
||||
#endif
|
||||
number_serviced = 0;
|
||||
|
||||
@ -344,7 +344,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
|
||||
if (!number_serviced)
|
||||
shost_printk(KERN_WARNING, sh, "interrupt received, but no mail.\n");
|
||||
return IRQ_HANDLED;
|
||||
};
|
||||
}
|
||||
|
||||
mbo = (scsi2int(mb[mbi].ccbptr) - (unsigned long)aha1542->ccb_handle) / sizeof(struct ccb);
|
||||
mbistatus = mb[mbi].status;
|
||||
@ -408,7 +408,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
|
||||
*/
|
||||
scsi_done(tmp_cmd);
|
||||
number_serviced++;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
||||
@ -534,7 +534,7 @@ static void setup_mailboxes(struct Scsi_Host *sh)
|
||||
any2scsi(aha1542->mb[i].ccbptr,
|
||||
aha1542->ccb_handle + i * sizeof(struct ccb));
|
||||
aha1542->mb[AHA1542_MAILBOXES + i].status = 0;
|
||||
};
|
||||
}
|
||||
aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */
|
||||
any2scsi(mb_cmd + 2, aha1542->mb_handle);
|
||||
if (aha1542_out(sh->io_port, mb_cmd, 5))
|
||||
@ -549,7 +549,7 @@ static int aha1542_getconfig(struct Scsi_Host *sh)
|
||||
i = inb(STATUS(sh->io_port));
|
||||
if (i & DF) {
|
||||
i = inb(DATA(sh->io_port));
|
||||
};
|
||||
}
|
||||
aha1542_outb(sh->io_port, CMD_RETCONF);
|
||||
aha1542_in(sh->io_port, inquiry_result, 3, 0);
|
||||
if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0))
|
||||
@ -578,7 +578,7 @@ static int aha1542_getconfig(struct Scsi_Host *sh)
|
||||
default:
|
||||
shost_printk(KERN_ERR, sh, "Unable to determine DMA channel.\n");
|
||||
return -1;
|
||||
};
|
||||
}
|
||||
switch (inquiry_result[1]) {
|
||||
case 0x40:
|
||||
sh->irq = 15;
|
||||
@ -601,7 +601,7 @@ static int aha1542_getconfig(struct Scsi_Host *sh)
|
||||
default:
|
||||
shost_printk(KERN_ERR, sh, "Unable to determine IRQ level.\n");
|
||||
return -1;
|
||||
};
|
||||
}
|
||||
sh->this_id = inquiry_result[2] & 7;
|
||||
return 0;
|
||||
}
|
||||
@ -636,7 +636,7 @@ static int aha1542_mbenable(struct Scsi_Host *sh)
|
||||
|
||||
if (aha1542_out(sh->io_port, mbenable_cmd, 3))
|
||||
goto fail;
|
||||
};
|
||||
}
|
||||
while (0) {
|
||||
fail:
|
||||
shost_printk(KERN_ERR, sh, "Mailbox init failed\n");
|
||||
@ -654,7 +654,7 @@ static int aha1542_query(struct Scsi_Host *sh)
|
||||
i = inb(STATUS(sh->io_port));
|
||||
if (i & DF) {
|
||||
i = inb(DATA(sh->io_port));
|
||||
};
|
||||
}
|
||||
aha1542_outb(sh->io_port, CMD_INQUIRY);
|
||||
aha1542_in(sh->io_port, inquiry_result, 4, 0);
|
||||
if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0))
|
||||
@ -673,7 +673,7 @@ static int aha1542_query(struct Scsi_Host *sh)
|
||||
if (inquiry_result[0] == 0x43) {
|
||||
shost_printk(KERN_INFO, sh, "Emulation mode not supported for AHA-1740 hardware, use aha1740 driver instead.\n");
|
||||
return 1;
|
||||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* Always call this - boards that do not support extended bios translation
|
||||
|
@ -371,7 +371,6 @@ bfad_debugfs_release_fwtrc(struct inode *inode, struct file *file)
|
||||
if (!fw_debug)
|
||||
return 0;
|
||||
|
||||
if (fw_debug->debug_buffer)
|
||||
vfree(fw_debug->debug_buffer);
|
||||
|
||||
file->private_data = NULL;
|
||||
|
@ -755,7 +755,6 @@ void
|
||||
bfad_destroy_workq(struct bfad_im_s *im)
|
||||
{
|
||||
if (im && im->drv_workq) {
|
||||
flush_workqueue(im->drv_workq);
|
||||
destroy_workqueue(im->drv_workq);
|
||||
im->drv_workq = NULL;
|
||||
}
|
||||
|
@ -273,7 +273,6 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
|
||||
struct fcoe_port *port;
|
||||
struct fcoe_hdr *hp;
|
||||
struct bnx2fc_rport *tgt;
|
||||
struct fc_stats *stats;
|
||||
u8 sof, eof;
|
||||
u32 crc;
|
||||
unsigned int hlen, tlen, elen;
|
||||
@ -399,10 +398,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
|
||||
}
|
||||
|
||||
/*update tx stats */
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->TxFrames++;
|
||||
stats->TxWords += wlen;
|
||||
put_cpu();
|
||||
this_cpu_inc(lport->stats->TxFrames);
|
||||
this_cpu_add(lport->stats->TxWords, wlen);
|
||||
|
||||
/* send down to lld */
|
||||
fr_dev(fp) = lport;
|
||||
@ -512,7 +509,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
|
||||
u32 fr_len, fr_crc;
|
||||
struct fc_lport *lport;
|
||||
struct fcoe_rcv_info *fr;
|
||||
struct fc_stats *stats;
|
||||
struct fc_frame_header *fh;
|
||||
struct fcoe_crc_eof crc_eof;
|
||||
struct fc_frame *fp;
|
||||
@ -543,10 +539,8 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
|
||||
skb_pull(skb, sizeof(struct fcoe_hdr));
|
||||
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
|
||||
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->RxFrames++;
|
||||
stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
|
||||
put_cpu();
|
||||
this_cpu_inc(lport->stats->RxFrames);
|
||||
this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE);
|
||||
|
||||
fp = (struct fc_frame *)skb;
|
||||
fc_frame_init(fp);
|
||||
@ -633,9 +627,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
|
||||
fr_crc = le32_to_cpu(fr_crc(fp));
|
||||
|
||||
if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) {
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
crc_err = (stats->InvalidCRCCount++);
|
||||
put_cpu();
|
||||
crc_err = this_cpu_inc_return(lport->stats->InvalidCRCCount);
|
||||
if (crc_err < 5)
|
||||
printk(KERN_WARNING PFX "dropping frame with "
|
||||
"CRC error\n");
|
||||
@ -964,9 +956,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
|
||||
mutex_unlock(&lport->lp_mutex);
|
||||
fc_host_port_type(lport->host) =
|
||||
FC_PORTTYPE_UNKNOWN;
|
||||
per_cpu_ptr(lport->stats,
|
||||
get_cpu())->LinkFailureCount++;
|
||||
put_cpu();
|
||||
this_cpu_inc(lport->stats->LinkFailureCount);
|
||||
fcoe_clean_pending_queue(lport);
|
||||
wait_for_upload = 1;
|
||||
}
|
||||
|
@ -472,7 +472,7 @@ struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
|
||||
u32 free_sqes;
|
||||
u32 max_sqes;
|
||||
u16 xid;
|
||||
int index = get_cpu();
|
||||
int index = raw_smp_processor_id();
|
||||
|
||||
max_sqes = BNX2FC_SCSI_MAX_SQES;
|
||||
/*
|
||||
@ -485,7 +485,6 @@ struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
|
||||
(tgt->num_active_ios.counter >= max_sqes) ||
|
||||
(free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
|
||||
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
|
||||
put_cpu();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -498,7 +497,6 @@ struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
|
||||
atomic_inc(&tgt->num_active_ios);
|
||||
atomic_dec(&tgt->free_sqes);
|
||||
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
|
||||
put_cpu();
|
||||
|
||||
INIT_LIST_HEAD(&io_req->link);
|
||||
|
||||
@ -2032,7 +2030,6 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
struct fc_lport *lport = port->lport;
|
||||
struct fc_stats *stats;
|
||||
int task_idx, index;
|
||||
u16 xid;
|
||||
|
||||
@ -2045,20 +2042,18 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
|
||||
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
|
||||
bnx2fc_priv(sc_cmd)->io_req = io_req;
|
||||
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
|
||||
io_req->io_req_flags = BNX2FC_READ;
|
||||
stats->InputRequests++;
|
||||
stats->InputBytes += io_req->data_xfer_len;
|
||||
this_cpu_inc(lport->stats->InputRequests);
|
||||
this_cpu_add(lport->stats->InputBytes, io_req->data_xfer_len);
|
||||
} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
|
||||
io_req->io_req_flags = BNX2FC_WRITE;
|
||||
stats->OutputRequests++;
|
||||
stats->OutputBytes += io_req->data_xfer_len;
|
||||
this_cpu_inc(lport->stats->OutputRequests);
|
||||
this_cpu_add(lport->stats->OutputBytes, io_req->data_xfer_len);
|
||||
} else {
|
||||
io_req->io_req_flags = 0;
|
||||
stats->ControlRequests++;
|
||||
this_cpu_inc(lport->stats->ControlRequests);
|
||||
}
|
||||
put_cpu();
|
||||
|
||||
xid = io_req->xid;
|
||||
|
||||
|
@ -3585,10 +3585,19 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
|
||||
#endif
|
||||
if (dcb->target_lun != 0) {
|
||||
/* Copy settings */
|
||||
struct DeviceCtlBlk *p;
|
||||
list_for_each_entry(p, &acb->dcb_list, list)
|
||||
if (p->target_id == dcb->target_id)
|
||||
struct DeviceCtlBlk *p = NULL, *iter;
|
||||
|
||||
list_for_each_entry(iter, &acb->dcb_list, list)
|
||||
if (iter->target_id == dcb->target_id) {
|
||||
p = iter;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!p) {
|
||||
kfree(dcb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dprintkdbg(DBG_1,
|
||||
"device_alloc: <%02i-%i> copy from <%02i-%i>\n",
|
||||
dcb->target_id, dcb->target_lun,
|
||||
|
@ -1000,7 +1000,6 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
|
||||
|
||||
// Initializing the spinlocks
|
||||
spin_lock_init(&pHba->state_lock);
|
||||
spin_lock_init(&adpt_post_wait_lock);
|
||||
|
||||
if(raptorFlag == 0){
|
||||
printk(KERN_INFO "Adaptec I2O RAID controller"
|
||||
|
@ -1402,7 +1402,6 @@ efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, void *arg)
|
||||
mutex_lock(&hw->bmbx_lock);
|
||||
bmbx = hw->sli.bmbx.virt;
|
||||
|
||||
memset(bmbx, 0, SLI4_BMBX_SIZE);
|
||||
memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
|
||||
|
||||
if (sli_bmbx_command(&hw->sli) == 0) {
|
||||
|
@ -62,7 +62,6 @@ efct_io_pool_create(struct efct *efct, u32 num_sgl)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(io->sgl, 0, sizeof(*io->sgl) * num_sgl);
|
||||
io->sgl_allocated = num_sgl;
|
||||
io->sgl_count = 0;
|
||||
|
||||
|
@ -370,9 +370,6 @@ static int efct_lio_get_cmd_state(struct se_cmd *cmd)
|
||||
container_of(cmd, struct efct_scsi_tgt_io, cmd);
|
||||
struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
|
||||
|
||||
if (!io)
|
||||
return 0;
|
||||
|
||||
return io->tgt_io.state;
|
||||
}
|
||||
|
||||
|
@ -1434,8 +1434,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
|
||||
|
||||
return NET_RX_SUCCESS;
|
||||
err:
|
||||
per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++;
|
||||
put_cpu();
|
||||
this_cpu_inc(lport->stats->ErrorFrames);
|
||||
err2:
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
@ -1453,9 +1452,10 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
|
||||
struct fcoe_percpu_s *fps;
|
||||
int rc;
|
||||
|
||||
fps = &get_cpu_var(fcoe_percpu);
|
||||
local_lock(&fcoe_percpu.lock);
|
||||
fps = this_cpu_ptr(&fcoe_percpu);
|
||||
rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
|
||||
put_cpu_var(fcoe_percpu);
|
||||
local_unlock(&fcoe_percpu.lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -1474,7 +1474,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
|
||||
struct ethhdr *eh;
|
||||
struct fcoe_crc_eof *cp;
|
||||
struct sk_buff *skb;
|
||||
struct fc_stats *stats;
|
||||
struct fc_frame_header *fh;
|
||||
unsigned int hlen; /* header length implies the version */
|
||||
unsigned int tlen; /* trailer length */
|
||||
@ -1585,10 +1584,8 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
|
||||
skb_shinfo(skb)->gso_size = 0;
|
||||
}
|
||||
/* update tx stats: regardless if LLD fails */
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->TxFrames++;
|
||||
stats->TxWords += wlen;
|
||||
put_cpu();
|
||||
this_cpu_inc(lport->stats->TxFrames);
|
||||
this_cpu_add(lport->stats->TxWords, wlen);
|
||||
|
||||
/* send down to lld */
|
||||
fr_dev(fp) = lport;
|
||||
@ -1610,7 +1607,6 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
|
||||
struct fcoe_interface *fcoe;
|
||||
struct fc_frame_header *fh;
|
||||
struct sk_buff *skb = (struct sk_buff *)fp;
|
||||
struct fc_stats *stats;
|
||||
|
||||
/*
|
||||
* We only check CRC if no offload is available and if it is
|
||||
@ -1640,11 +1636,8 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
|
||||
return 0;
|
||||
}
|
||||
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->InvalidCRCCount++;
|
||||
if (stats->InvalidCRCCount < 5)
|
||||
if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < 5)
|
||||
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
|
||||
put_cpu();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1657,7 +1650,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
|
||||
u32 fr_len;
|
||||
struct fc_lport *lport;
|
||||
struct fcoe_rcv_info *fr;
|
||||
struct fc_stats *stats;
|
||||
struct fcoe_crc_eof crc_eof;
|
||||
struct fc_frame *fp;
|
||||
struct fcoe_hdr *hp;
|
||||
@ -1685,9 +1677,11 @@ static void fcoe_recv_frame(struct sk_buff *skb)
|
||||
*/
|
||||
hp = (struct fcoe_hdr *) skb_network_header(skb);
|
||||
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
|
||||
if (stats->ErrorFrames < 5)
|
||||
struct fc_stats *stats;
|
||||
|
||||
stats = per_cpu_ptr(lport->stats, raw_smp_processor_id());
|
||||
if (READ_ONCE(stats->ErrorFrames) < 5)
|
||||
printk(KERN_WARNING "fcoe: FCoE version "
|
||||
"mismatch: The frame has "
|
||||
"version %x, but the "
|
||||
@ -1700,8 +1694,8 @@ static void fcoe_recv_frame(struct sk_buff *skb)
|
||||
skb_pull(skb, sizeof(struct fcoe_hdr));
|
||||
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
|
||||
|
||||
stats->RxFrames++;
|
||||
stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
|
||||
this_cpu_inc(lport->stats->RxFrames);
|
||||
this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE);
|
||||
|
||||
fp = (struct fc_frame *)skb;
|
||||
fc_frame_init(fp);
|
||||
@ -1717,13 +1711,11 @@ static void fcoe_recv_frame(struct sk_buff *skb)
|
||||
goto drop;
|
||||
|
||||
if (!fcoe_filter_frames(lport, fp)) {
|
||||
put_cpu();
|
||||
fc_exch_recv(lport, fp);
|
||||
return;
|
||||
}
|
||||
drop:
|
||||
stats->ErrorFrames++;
|
||||
put_cpu();
|
||||
this_cpu_inc(lport->stats->ErrorFrames);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
@ -1847,7 +1839,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
|
||||
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
|
||||
struct fcoe_ctlr *ctlr;
|
||||
struct fcoe_interface *fcoe;
|
||||
struct fc_stats *stats;
|
||||
u32 link_possible = 1;
|
||||
u32 mfs;
|
||||
int rc = NOTIFY_OK;
|
||||
@ -1921,9 +1912,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
|
||||
break;
|
||||
case FCOE_CTLR_ENABLED:
|
||||
case FCOE_CTLR_UNUSED:
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->LinkFailureCount++;
|
||||
put_cpu();
|
||||
this_cpu_inc(lport->stats->LinkFailureCount);
|
||||
fcoe_clean_pending_queue(lport);
|
||||
}
|
||||
}
|
||||
@ -2488,6 +2477,7 @@ static int __init fcoe_init(void)
|
||||
p = per_cpu_ptr(&fcoe_percpu, cpu);
|
||||
INIT_WORK(&p->work, fcoe_receive_work);
|
||||
skb_queue_head_init(&p->fcoe_rx_list);
|
||||
local_lock_init(&p->lock);
|
||||
}
|
||||
|
||||
/* Setup link change notification */
|
||||
@ -2580,7 +2570,7 @@ static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
|
||||
/* pre-FIP */
|
||||
if (is_zero_ether_addr(mac))
|
||||
fcoe_ctlr_recv_flogi(fip, lport, fp);
|
||||
if (!is_zero_ether_addr(mac))
|
||||
else
|
||||
fcoe_update_src_mac(lport, mac);
|
||||
done:
|
||||
fc_lport_flogi_resp(seq, fp, lport);
|
||||
|
@ -824,22 +824,21 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
|
||||
unsigned long deadline;
|
||||
unsigned long sel_time = 0;
|
||||
struct list_head del_list;
|
||||
struct fc_stats *stats;
|
||||
|
||||
INIT_LIST_HEAD(&del_list);
|
||||
|
||||
stats = per_cpu_ptr(fip->lp->stats, get_cpu());
|
||||
|
||||
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
|
||||
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
|
||||
if (fip->sel_fcf == fcf) {
|
||||
if (time_after(jiffies, deadline)) {
|
||||
stats->MissDiscAdvCount++;
|
||||
u64 miss_cnt;
|
||||
|
||||
miss_cnt = this_cpu_inc_return(fip->lp->stats->MissDiscAdvCount);
|
||||
printk(KERN_INFO "libfcoe: host%d: "
|
||||
"Missing Discovery Advertisement "
|
||||
"for fab %16.16llx count %lld\n",
|
||||
fip->lp->host->host_no, fcf->fabric_name,
|
||||
stats->MissDiscAdvCount);
|
||||
miss_cnt);
|
||||
} else if (time_after(next_timer, deadline))
|
||||
next_timer = deadline;
|
||||
}
|
||||
@ -855,7 +854,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
|
||||
*/
|
||||
list_del(&fcf->list);
|
||||
list_add(&fcf->list, &del_list);
|
||||
stats->VLinkFailureCount++;
|
||||
this_cpu_inc(fip->lp->stats->VLinkFailureCount);
|
||||
} else {
|
||||
if (time_after(next_timer, deadline))
|
||||
next_timer = deadline;
|
||||
@ -864,7 +863,6 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
|
||||
sel_time = fcf->time;
|
||||
}
|
||||
}
|
||||
put_cpu();
|
||||
|
||||
list_for_each_entry_safe(fcf, next, &del_list, list) {
|
||||
/* Removes fcf from current list */
|
||||
@ -1142,7 +1140,6 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
||||
struct fip_desc *desc;
|
||||
struct fip_encaps *els;
|
||||
struct fcoe_fcf *sel;
|
||||
struct fc_stats *stats;
|
||||
enum fip_desc_type els_dtype = 0;
|
||||
u8 els_op;
|
||||
u8 sub;
|
||||
@ -1286,10 +1283,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
||||
fr_dev(fp) = lport;
|
||||
fr_encaps(fp) = els_dtype;
|
||||
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->RxFrames++;
|
||||
stats->RxWords += skb->len / FIP_BPW;
|
||||
put_cpu();
|
||||
this_cpu_inc(lport->stats->RxFrames);
|
||||
this_cpu_add(lport->stats->RxWords, skb->len / FIP_BPW);
|
||||
|
||||
fc_exch_recv(lport, fp);
|
||||
return;
|
||||
@ -1427,9 +1422,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
|
||||
ntoh24(vp->fd_fc_id));
|
||||
if (vn_port && (vn_port == lport)) {
|
||||
mutex_lock(&fip->ctlr_mutex);
|
||||
per_cpu_ptr(lport->stats,
|
||||
get_cpu())->VLinkFailureCount++;
|
||||
put_cpu();
|
||||
this_cpu_inc(lport->stats->VLinkFailureCount);
|
||||
fcoe_ctlr_reset(fip);
|
||||
mutex_unlock(&fip->ctlr_mutex);
|
||||
}
|
||||
@ -1457,8 +1450,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
|
||||
* followed by physical port
|
||||
*/
|
||||
mutex_lock(&fip->ctlr_mutex);
|
||||
per_cpu_ptr(lport->stats, get_cpu())->VLinkFailureCount++;
|
||||
put_cpu();
|
||||
this_cpu_inc(lport->stats->VLinkFailureCount);
|
||||
fcoe_ctlr_reset(fip);
|
||||
mutex_unlock(&fip->ctlr_mutex);
|
||||
|
||||
|
@ -183,9 +183,9 @@ void __fcoe_get_lesb(struct fc_lport *lport,
|
||||
memset(lesb, 0, sizeof(*lesb));
|
||||
for_each_possible_cpu(cpu) {
|
||||
stats = per_cpu_ptr(lport->stats, cpu);
|
||||
lfc += stats->LinkFailureCount;
|
||||
vlfc += stats->VLinkFailureCount;
|
||||
mdac += stats->MissDiscAdvCount;
|
||||
lfc += READ_ONCE(stats->LinkFailureCount);
|
||||
vlfc += READ_ONCE(stats->VLinkFailureCount);
|
||||
mdac += READ_ONCE(stats->MissDiscAdvCount);
|
||||
}
|
||||
lesb->lesb_link_fail = htonl(lfc);
|
||||
lesb->lesb_vlink_fail = htonl(vlfc);
|
||||
|
@ -39,7 +39,7 @@
|
||||
|
||||
#define DRV_NAME "fnic"
|
||||
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
|
||||
#define DRV_VERSION "1.6.0.53"
|
||||
#define DRV_VERSION "1.6.0.54"
|
||||
#define PFX DRV_NAME ": "
|
||||
#define DFX DRV_NAME "%d: "
|
||||
|
||||
|
@ -86,7 +86,6 @@ void fnic_debugfs_terminate(void)
|
||||
debugfs_remove(fnic_trace_debugfs_root);
|
||||
fnic_trace_debugfs_root = NULL;
|
||||
|
||||
if (fc_trc_flag)
|
||||
vfree(fc_trc_flag);
|
||||
}
|
||||
|
||||
|
@ -612,10 +612,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
pci_set_master(pdev);
|
||||
|
||||
/* Query PCI controller on system for DMA addressing
|
||||
* limitation for the device. Try 64-bit first, and
|
||||
* fail to 32-bit.
|
||||
* limitation for the device. Try 47-bit first, and
|
||||
* fail to 32-bit. Cisco VIC supports 47 bits only.
|
||||
*/
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47));
|
||||
if (err) {
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
@ -1146,10 +1146,8 @@ static void __exit fnic_cleanup_module(void)
|
||||
{
|
||||
pci_unregister_driver(&fnic_driver);
|
||||
destroy_workqueue(fnic_event_queue);
|
||||
if (fnic_fip_queue) {
|
||||
flush_workqueue(fnic_fip_queue);
|
||||
if (fnic_fip_queue)
|
||||
destroy_workqueue(fnic_fip_queue);
|
||||
}
|
||||
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
|
||||
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
|
||||
kmem_cache_destroy(fnic_io_req_cache);
|
||||
|
@ -446,6 +446,8 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
|
||||
return;
|
||||
}
|
||||
|
||||
/* Make slot memories observable before marking as ready */
|
||||
smp_wmb();
|
||||
WRITE_ONCE(slot->ready, 1);
|
||||
|
||||
spin_lock(&dq->lock);
|
||||
@ -709,8 +711,6 @@ static int hisi_sas_init_device(struct domain_device *device)
|
||||
struct scsi_lun lun;
|
||||
int retry = HISI_SAS_DISK_RECOVER_CNT;
|
||||
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct sas_phy *local_phy;
|
||||
|
||||
switch (device->dev_type) {
|
||||
case SAS_END_DEVICE:
|
||||
@ -729,30 +729,18 @@ static int hisi_sas_init_device(struct domain_device *device)
|
||||
case SAS_SATA_PM_PORT:
|
||||
case SAS_SATA_PENDING:
|
||||
/*
|
||||
* send HARD RESET to clear previous affiliation of
|
||||
* STP target port
|
||||
* If an expander is swapped when a SATA disk is attached then
|
||||
* we should issue a hard reset to clear previous affiliation
|
||||
* of STP target port, see SPL (chapter 6.19.4).
|
||||
*
|
||||
* However we don't need to issue a hard reset here for these
|
||||
* reasons:
|
||||
* a. When probing the device, libsas/libata already issues a
|
||||
* hard reset in sas_probe_sata() -> ata_sas_async_probe().
|
||||
* Note that in hisi_sas_debug_I_T_nexus_reset() we take care
|
||||
* to issue a hard reset by checking the dev status (== INIT).
|
||||
* b. When resetting the controller, this is simply unnecessary.
|
||||
*/
|
||||
local_phy = sas_get_local_phy(device);
|
||||
if (!scsi_is_sas_phy_local(local_phy) &&
|
||||
!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
|
||||
unsigned long deadline = ata_deadline(jiffies, 20000);
|
||||
struct sata_device *sata_dev = &device->sata_dev;
|
||||
struct ata_host *ata_host = sata_dev->ata_host;
|
||||
struct ata_port_operations *ops = ata_host->ops;
|
||||
struct ata_port *ap = sata_dev->ap;
|
||||
struct ata_link *link;
|
||||
unsigned int classes;
|
||||
|
||||
ata_for_each_link(link, ap, EDGE)
|
||||
rc = ops->hardreset(link, &classes,
|
||||
deadline);
|
||||
}
|
||||
sas_put_local_phy(local_phy);
|
||||
if (rc) {
|
||||
dev_warn(dev, "SATA disk hardreset fail: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
while (retry-- > 0) {
|
||||
rc = hisi_sas_softreset_ata_disk(device);
|
||||
if (!rc)
|
||||
@ -768,15 +756,19 @@ static int hisi_sas_init_device(struct domain_device *device)
|
||||
|
||||
int hisi_sas_slave_alloc(struct scsi_device *sdev)
|
||||
{
|
||||
struct domain_device *ddev;
|
||||
struct domain_device *ddev = sdev_to_domain_dev(sdev);
|
||||
struct hisi_sas_device *sas_dev = ddev->lldd_dev;
|
||||
int rc;
|
||||
|
||||
rc = sas_slave_alloc(sdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
ddev = sdev_to_domain_dev(sdev);
|
||||
|
||||
return hisi_sas_init_device(ddev);
|
||||
rc = hisi_sas_init_device(ddev);
|
||||
if (rc)
|
||||
return rc;
|
||||
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc);
|
||||
|
||||
@ -826,7 +818,6 @@ static int hisi_sas_dev_found(struct domain_device *device)
|
||||
dev_info(dev, "dev[%d:%x] found\n",
|
||||
sas_dev->device_id, sas_dev->dev_type);
|
||||
|
||||
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
@ -1710,12 +1701,17 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
|
||||
/* report PHY down if timed out */
|
||||
if (rc == -ETIMEDOUT)
|
||||
hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL);
|
||||
} else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
|
||||
/*
|
||||
* If in init state, we rely on caller to wait for link to be
|
||||
* ready; otherwise, except phy reset is fail, delay.
|
||||
*/
|
||||
if (!rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Remote phy */
|
||||
if (dev_is_sata(device)) {
|
||||
rc = sas_ata_wait_after_reset(device,
|
||||
HISI_SAS_WAIT_PHYUP_TIMEOUT);
|
||||
} else {
|
||||
msleep(2000);
|
||||
}
|
||||
|
||||
|
@ -1563,9 +1563,15 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
|
||||
|
||||
phy->port_id = port_id;
|
||||
|
||||
/* Call pm_runtime_put_sync() with pairs in hisi_sas_phyup_pm_work() */
|
||||
/*
|
||||
* Call pm_runtime_get_noresume() which pairs with
|
||||
* hisi_sas_phyup_pm_work() -> pm_runtime_put_sync().
|
||||
* For failure call pm_runtime_put() as we are in a hardirq context.
|
||||
*/
|
||||
pm_runtime_get_noresume(dev);
|
||||
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP_PM);
|
||||
res = hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP_PM);
|
||||
if (!res)
|
||||
pm_runtime_put(dev);
|
||||
|
||||
res = IRQ_HANDLED;
|
||||
|
||||
|
@ -229,10 +229,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
error = scsi_mq_setup_tags(shost);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
if (!shost->shost_gendev.parent)
|
||||
shost->shost_gendev.parent = dev ? dev : &platform_bus;
|
||||
if (!dma_dev)
|
||||
@ -240,6 +236,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
|
||||
|
||||
shost->dma_dev = dma_dev;
|
||||
|
||||
error = scsi_mq_setup_tags(shost);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* Increase usage count temporarily here so that calling
|
||||
* scsi_autopm_put_host() will trigger runtime idle if there is
|
||||
|
@ -3456,7 +3456,7 @@ static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
char *buf, loff_t off, size_t count)
|
||||
{
|
||||
struct device *dev = container_of(kobj, struct device, kobj);
|
||||
struct device *dev = kobj_to_dev(kobj);
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
|
||||
unsigned long lock_flags = 0;
|
||||
@ -4182,7 +4182,7 @@ static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct device *cdev = container_of(kobj, struct device, kobj);
|
||||
struct device *cdev = kobj_to_dev(kobj);
|
||||
struct Scsi_Host *shost = class_to_shost(cdev);
|
||||
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
|
||||
struct ipr_hostrcb *hostrcb;
|
||||
@ -4206,7 +4206,7 @@ static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct device *cdev = container_of(kobj, struct device, kobj);
|
||||
struct device *cdev = kobj_to_dev(kobj);
|
||||
struct Scsi_Host *shost = class_to_shost(cdev);
|
||||
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
|
||||
struct ipr_hostrcb *hostrcb;
|
||||
@ -4267,7 +4267,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
char *buf, loff_t off, size_t count)
|
||||
{
|
||||
struct device *cdev = container_of(kobj, struct device, kobj);
|
||||
struct device *cdev = kobj_to_dev(kobj);
|
||||
struct Scsi_Host *shost = class_to_shost(cdev);
|
||||
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
|
||||
struct ipr_dump *dump;
|
||||
@ -4456,7 +4456,7 @@ static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
char *buf, loff_t off, size_t count)
|
||||
{
|
||||
struct device *cdev = container_of(kobj, struct device, kobj);
|
||||
struct device *cdev = kobj_to_dev(kobj);
|
||||
struct Scsi_Host *shost = class_to_shost(cdev);
|
||||
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
|
||||
int rc;
|
||||
@ -10092,7 +10092,6 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
|
||||
{
|
||||
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
|
||||
unsigned long lock_flags = 0;
|
||||
irqreturn_t rc = IRQ_HANDLED;
|
||||
|
||||
dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
|
||||
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
|
||||
@ -10101,7 +10100,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
|
||||
wake_up(&ioa_cfg->msi_wait_q);
|
||||
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
return rc;
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -825,10 +825,9 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
|
||||
}
|
||||
memset(ep, 0, sizeof(*ep));
|
||||
|
||||
cpu = get_cpu();
|
||||
cpu = raw_smp_processor_id();
|
||||
pool = per_cpu_ptr(mp->pool, cpu);
|
||||
spin_lock_bh(&pool->lock);
|
||||
put_cpu();
|
||||
|
||||
/* peek cache of free slot */
|
||||
if (pool->left != FC_XID_UNKNOWN) {
|
||||
|
@ -143,8 +143,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
|
||||
INIT_LIST_HEAD(&fsp->list);
|
||||
spin_lock_init(&fsp->scsi_pkt_lock);
|
||||
} else {
|
||||
per_cpu_ptr(lport->stats, get_cpu())->FcpPktAllocFails++;
|
||||
put_cpu();
|
||||
this_cpu_inc(lport->stats->FcpPktAllocFails);
|
||||
}
|
||||
return fsp;
|
||||
}
|
||||
@ -266,8 +265,7 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
|
||||
if (!fsp->seq_ptr)
|
||||
return -EINVAL;
|
||||
|
||||
per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++;
|
||||
put_cpu();
|
||||
this_cpu_inc(fsp->lp->stats->FcpPktAborts);
|
||||
|
||||
fsp->state |= FC_SRB_ABORT_PENDING;
|
||||
rc = fc_seq_exch_abort(fsp->seq_ptr, 0);
|
||||
@ -436,8 +434,7 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
|
||||
if (likely(fp))
|
||||
return fp;
|
||||
|
||||
per_cpu_ptr(lport->stats, get_cpu())->FcpFrameAllocFails++;
|
||||
put_cpu();
|
||||
this_cpu_inc(lport->stats->FcpFrameAllocFails);
|
||||
/* error case */
|
||||
fc_fcp_can_queue_ramp_down(lport);
|
||||
shost_printk(KERN_ERR, lport->host,
|
||||
@ -471,7 +468,6 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
|
||||
{
|
||||
struct scsi_cmnd *sc = fsp->cmd;
|
||||
struct fc_lport *lport = fsp->lp;
|
||||
struct fc_stats *stats;
|
||||
struct fc_frame_header *fh;
|
||||
size_t start_offset;
|
||||
size_t offset;
|
||||
@ -533,14 +529,12 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
|
||||
|
||||
if (~crc != le32_to_cpu(fr_crc(fp))) {
|
||||
crc_err:
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->ErrorFrames++;
|
||||
this_cpu_inc(lport->stats->ErrorFrames);
|
||||
/* per cpu count, not total count, but OK for limit */
|
||||
if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT)
|
||||
if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < FC_MAX_ERROR_CNT)
|
||||
printk(KERN_WARNING "libfc: CRC error on data "
|
||||
"frame for port (%6.6x)\n",
|
||||
lport->port_id);
|
||||
put_cpu();
|
||||
/*
|
||||
* Assume the frame is total garbage.
|
||||
* We may have copied it over the good part
|
||||
@ -1861,7 +1855,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
|
||||
struct fc_fcp_pkt *fsp;
|
||||
int rval;
|
||||
int rc = 0;
|
||||
struct fc_stats *stats;
|
||||
|
||||
rval = fc_remote_port_chkready(rport);
|
||||
if (rval) {
|
||||
@ -1913,20 +1906,18 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
|
||||
/*
|
||||
* setup the data direction
|
||||
*/
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
|
||||
fsp->req_flags = FC_SRB_READ;
|
||||
stats->InputRequests++;
|
||||
stats->InputBytes += fsp->data_len;
|
||||
this_cpu_inc(lport->stats->InputRequests);
|
||||
this_cpu_add(lport->stats->InputBytes, fsp->data_len);
|
||||
} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
|
||||
fsp->req_flags = FC_SRB_WRITE;
|
||||
stats->OutputRequests++;
|
||||
stats->OutputBytes += fsp->data_len;
|
||||
this_cpu_inc(lport->stats->OutputRequests);
|
||||
this_cpu_add(lport->stats->OutputBytes, fsp->data_len);
|
||||
} else {
|
||||
fsp->req_flags = 0;
|
||||
stats->ControlRequests++;
|
||||
this_cpu_inc(lport->stats->ControlRequests);
|
||||
}
|
||||
put_cpu();
|
||||
|
||||
/*
|
||||
* send it to the lower layer
|
||||
|
@ -308,21 +308,21 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
|
||||
|
||||
stats = per_cpu_ptr(lport->stats, cpu);
|
||||
|
||||
fc_stats->tx_frames += stats->TxFrames;
|
||||
fc_stats->tx_words += stats->TxWords;
|
||||
fc_stats->rx_frames += stats->RxFrames;
|
||||
fc_stats->rx_words += stats->RxWords;
|
||||
fc_stats->error_frames += stats->ErrorFrames;
|
||||
fc_stats->invalid_crc_count += stats->InvalidCRCCount;
|
||||
fc_stats->fcp_input_requests += stats->InputRequests;
|
||||
fc_stats->fcp_output_requests += stats->OutputRequests;
|
||||
fc_stats->fcp_control_requests += stats->ControlRequests;
|
||||
fcp_in_bytes += stats->InputBytes;
|
||||
fcp_out_bytes += stats->OutputBytes;
|
||||
fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails;
|
||||
fc_stats->fcp_packet_aborts += stats->FcpPktAborts;
|
||||
fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails;
|
||||
fc_stats->link_failure_count += stats->LinkFailureCount;
|
||||
fc_stats->tx_frames += READ_ONCE(stats->TxFrames);
|
||||
fc_stats->tx_words += READ_ONCE(stats->TxWords);
|
||||
fc_stats->rx_frames += READ_ONCE(stats->RxFrames);
|
||||
fc_stats->rx_words += READ_ONCE(stats->RxWords);
|
||||
fc_stats->error_frames += READ_ONCE(stats->ErrorFrames);
|
||||
fc_stats->invalid_crc_count += READ_ONCE(stats->InvalidCRCCount);
|
||||
fc_stats->fcp_input_requests += READ_ONCE(stats->InputRequests);
|
||||
fc_stats->fcp_output_requests += READ_ONCE(stats->OutputRequests);
|
||||
fc_stats->fcp_control_requests += READ_ONCE(stats->ControlRequests);
|
||||
fcp_in_bytes += READ_ONCE(stats->InputBytes);
|
||||
fcp_out_bytes += READ_ONCE(stats->OutputBytes);
|
||||
fc_stats->fcp_packet_alloc_failures += READ_ONCE(stats->FcpPktAllocFails);
|
||||
fc_stats->fcp_packet_aborts += READ_ONCE(stats->FcpPktAborts);
|
||||
fc_stats->fcp_frame_alloc_failures += READ_ONCE(stats->FcpFrameAllocFails);
|
||||
fc_stats->link_failure_count += READ_ONCE(stats->LinkFailureCount);
|
||||
}
|
||||
fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
|
||||
fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
|
||||
|
@ -358,22 +358,14 @@ static int sas_ata_printk(const char *level, const struct domain_device *ddev,
|
||||
return r;
|
||||
}
|
||||
|
||||
static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
|
||||
unsigned long deadline)
|
||||
int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline)
|
||||
{
|
||||
int ret = 0, res;
|
||||
struct sas_phy *phy;
|
||||
struct ata_port *ap = link->ap;
|
||||
struct sata_device *sata_dev = &dev->sata_dev;
|
||||
int (*check_ready)(struct ata_link *link);
|
||||
struct domain_device *dev = ap->private_data;
|
||||
struct sas_internal *i = dev_to_sas_internal(dev);
|
||||
|
||||
res = i->dft->lldd_I_T_nexus_reset(dev);
|
||||
if (res == -ENODEV)
|
||||
return res;
|
||||
|
||||
if (res != TMF_RESP_FUNC_COMPLETE)
|
||||
sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n");
|
||||
struct ata_port *ap = sata_dev->ap;
|
||||
struct ata_link *link = &ap->link;
|
||||
struct sas_phy *phy;
|
||||
int ret;
|
||||
|
||||
phy = sas_get_local_phy(dev);
|
||||
if (scsi_is_sas_phy_local(phy))
|
||||
@ -386,6 +378,27 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
|
||||
if (ret && ret != -EAGAIN)
|
||||
sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sas_ata_wait_after_reset);
|
||||
|
||||
static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
|
||||
unsigned long deadline)
|
||||
{
|
||||
struct ata_port *ap = link->ap;
|
||||
struct domain_device *dev = ap->private_data;
|
||||
struct sas_internal *i = dev_to_sas_internal(dev);
|
||||
int ret;
|
||||
|
||||
ret = i->dft->lldd_I_T_nexus_reset(dev);
|
||||
if (ret == -ENODEV)
|
||||
return ret;
|
||||
|
||||
if (ret != TMF_RESP_FUNC_COMPLETE)
|
||||
sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n");
|
||||
|
||||
ret = sas_ata_wait_after_reset(dev, deadline);
|
||||
|
||||
*class = dev->sata_dev.class;
|
||||
|
||||
ap->cbl = ATA_CBL_SATA;
|
||||
|
@ -604,6 +604,7 @@ struct lpfc_vport {
|
||||
#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
|
||||
#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
|
||||
#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */
|
||||
#define FC_RSCN_MEMENTO 0x4000000/* RSCN cmd processed */
|
||||
|
||||
uint32_t ct_flags;
|
||||
#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
|
||||
@ -611,6 +612,7 @@ struct lpfc_vport {
|
||||
#define FC_CT_RSNN_NN 0x4 /* RSNN_NN accepted by switch */
|
||||
#define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */
|
||||
#define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */
|
||||
#define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */
|
||||
|
||||
struct list_head fc_nodes;
|
||||
|
||||
@ -713,6 +715,7 @@ struct lpfc_vport {
|
||||
#define LPFC_VMID_QFPA_CMPL 0x4
|
||||
#define LPFC_VMID_QOS_ENABLED 0x8
|
||||
#define LPFC_VMID_TIMER_ENBLD 0x10
|
||||
#define LPFC_VMID_TYPE_PRIO 0x20
|
||||
struct fc_qfpa_res *qfpa_res;
|
||||
|
||||
struct fc_vport *fc_vport;
|
||||
@ -738,9 +741,8 @@ struct lpfc_vport {
|
||||
struct list_head rcv_buffer_list;
|
||||
unsigned long rcv_buffer_time_stamp;
|
||||
uint32_t vport_flag;
|
||||
#define STATIC_VPORT 1
|
||||
#define FAWWPN_SET 2
|
||||
#define FAWWPN_PARAM_CHG 4
|
||||
#define STATIC_VPORT 0x1
|
||||
#define FAWWPN_PARAM_CHG 0x2
|
||||
|
||||
uint16_t fdmi_num_disc;
|
||||
uint32_t fdmi_hba_mask;
|
||||
@ -1025,6 +1027,7 @@ struct lpfc_hba {
|
||||
#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
|
||||
#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
|
||||
#define LS_CT_VEN_RPA 0x20 /* Vendor RPA sent to switch */
|
||||
#define LS_EXTERNAL_LOOPBACK 0x40 /* External loopback plug inserted */
|
||||
|
||||
uint32_t hba_flag; /* hba generic flags */
|
||||
#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
|
||||
@ -1057,6 +1060,7 @@ struct lpfc_hba {
|
||||
#define HBA_HBEAT_INP 0x4000000 /* mbox HBEAT is in progress */
|
||||
#define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */
|
||||
#define HBA_FLOGI_OUTSTANDING 0x10000000 /* FLOGI is outstanding */
|
||||
#define HBA_RHBA_CMPL 0x20000000 /* RHBA FDMI command is successful */
|
||||
|
||||
struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */
|
||||
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@ -1120,14 +1120,24 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
|
||||
len += scnprintf(buf + len, PAGE_SIZE-len,
|
||||
" Private Loop\n");
|
||||
} else {
|
||||
if (vport->fc_flag & FC_FABRIC)
|
||||
len += scnprintf(buf + len, PAGE_SIZE-len,
|
||||
" Fabric\n");
|
||||
if (vport->fc_flag & FC_FABRIC) {
|
||||
if (phba->sli_rev == LPFC_SLI_REV4 &&
|
||||
vport->port_type == LPFC_PHYSICAL_PORT &&
|
||||
phba->sli4_hba.fawwpn_flag &
|
||||
LPFC_FAWWPN_FABRIC)
|
||||
len += scnprintf(buf + len,
|
||||
PAGE_SIZE - len,
|
||||
" Fabric FA-PWWN\n");
|
||||
else
|
||||
len += scnprintf(buf + len,
|
||||
PAGE_SIZE - len,
|
||||
" Fabric\n");
|
||||
} else {
|
||||
len += scnprintf(buf + len, PAGE_SIZE-len,
|
||||
" Point-2-Point\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((phba->sli_rev == LPFC_SLI_REV4) &&
|
||||
((bf_get(lpfc_sli_intf_if_type,
|
||||
@ -6878,17 +6888,34 @@ lpfc_get_stats(struct Scsi_Host *shost)
|
||||
memset(hs, 0, sizeof (struct fc_host_statistics));
|
||||
|
||||
hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
|
||||
hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
|
||||
|
||||
/*
|
||||
* The MBX_READ_STATUS returns tx_k_bytes which has to
|
||||
* converted to words
|
||||
* The MBX_READ_STATUS returns tx_k_bytes which has to be
|
||||
* converted to words.
|
||||
*
|
||||
* Check if extended byte flag is set, to know when to collect upper
|
||||
* bits of 64 bit wide statistics counter.
|
||||
*/
|
||||
if (pmb->un.varRdStatus.xkb & RD_ST_XKB) {
|
||||
hs->tx_words = (u64)
|
||||
((((u64)(pmb->un.varRdStatus.xmit_xkb &
|
||||
RD_ST_XMIT_XKB_MASK) << 32) |
|
||||
(u64)pmb->un.varRdStatus.xmitByteCnt) *
|
||||
(u64)256);
|
||||
hs->rx_words = (u64)
|
||||
((((u64)(pmb->un.varRdStatus.rcv_xkb &
|
||||
RD_ST_RCV_XKB_MASK) << 32) |
|
||||
(u64)pmb->un.varRdStatus.rcvByteCnt) *
|
||||
(u64)256);
|
||||
} else {
|
||||
hs->tx_words = (uint64_t)
|
||||
((uint64_t)pmb->un.varRdStatus.xmitByteCnt
|
||||
* (uint64_t)256);
|
||||
hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
|
||||
hs->rx_words = (uint64_t)
|
||||
((uint64_t)pmb->un.varRdStatus.rcvByteCnt
|
||||
* (uint64_t)256);
|
||||
}
|
||||
|
||||
memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
|
||||
pmb->mbxCommand = MBX_READ_LNK_STAT;
|
||||
|
@ -310,7 +310,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
|
||||
int rc = 0;
|
||||
u32 ulp_status, ulp_word4, total_data_placed;
|
||||
|
||||
dd_data = cmdiocbq->context1;
|
||||
dd_data = cmdiocbq->context_un.dd_data;
|
||||
|
||||
/* Determine if job has been aborted */
|
||||
spin_lock_irqsave(&phba->ct_ev_lock, flags);
|
||||
@ -328,10 +328,10 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
|
||||
iocb = &dd_data->context_un.iocb;
|
||||
ndlp = iocb->cmdiocbq->context_un.ndlp;
|
||||
ndlp = iocb->cmdiocbq->ndlp;
|
||||
rmp = iocb->rmp;
|
||||
cmp = cmdiocbq->context2;
|
||||
bmp = cmdiocbq->context3;
|
||||
cmp = cmdiocbq->cmd_dmabuf;
|
||||
bmp = cmdiocbq->bpl_dmabuf;
|
||||
ulp_status = get_job_ulpstatus(phba, rspiocbq);
|
||||
ulp_word4 = get_job_word4(phba, rspiocbq);
|
||||
total_data_placed = get_job_data_placed(phba, rspiocbq);
|
||||
@ -470,14 +470,12 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
|
||||
|
||||
cmdiocbq->num_bdes = num_entry;
|
||||
cmdiocbq->vport = phba->pport;
|
||||
cmdiocbq->context2 = cmp;
|
||||
cmdiocbq->context3 = bmp;
|
||||
cmdiocbq->cmd_dmabuf = cmp;
|
||||
cmdiocbq->bpl_dmabuf = bmp;
|
||||
cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
|
||||
|
||||
cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
|
||||
cmdiocbq->context1 = dd_data;
|
||||
cmdiocbq->context2 = cmp;
|
||||
cmdiocbq->context3 = bmp;
|
||||
cmdiocbq->context_un.dd_data = dd_data;
|
||||
|
||||
dd_data->type = TYPE_IOCB;
|
||||
dd_data->set_job = job;
|
||||
@ -495,8 +493,8 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
|
||||
readl(phba->HCregaddr); /* flush */
|
||||
}
|
||||
|
||||
cmdiocbq->context_un.ndlp = lpfc_nlp_get(ndlp);
|
||||
if (!cmdiocbq->context_un.ndlp) {
|
||||
cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
|
||||
if (!cmdiocbq->ndlp) {
|
||||
rc = -ENODEV;
|
||||
goto free_rmp;
|
||||
}
|
||||
@ -573,9 +571,9 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
|
||||
int rc = 0;
|
||||
u32 ulp_status, ulp_word4, total_data_placed;
|
||||
|
||||
dd_data = cmdiocbq->context1;
|
||||
dd_data = cmdiocbq->context_un.dd_data;
|
||||
ndlp = dd_data->context_un.iocb.ndlp;
|
||||
cmdiocbq->context1 = ndlp;
|
||||
cmdiocbq->ndlp = ndlp;
|
||||
|
||||
/* Determine if job has been aborted */
|
||||
spin_lock_irqsave(&phba->ct_ev_lock, flags);
|
||||
@ -595,7 +593,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
|
||||
ulp_status = get_job_ulpstatus(phba, rspiocbq);
|
||||
ulp_word4 = get_job_word4(phba, rspiocbq);
|
||||
total_data_placed = get_job_data_placed(phba, rspiocbq);
|
||||
pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
|
||||
pcmd = cmdiocbq->cmd_dmabuf;
|
||||
prsp = (struct lpfc_dmabuf *)pcmd->list.next;
|
||||
|
||||
/* Copy the completed job data or determine the job status if job is
|
||||
@ -711,8 +709,8 @@ lpfc_bsg_rport_els(struct bsg_job *job)
|
||||
/* Transfer the request payload to allocated command dma buffer */
|
||||
sg_copy_to_buffer(job->request_payload.sg_list,
|
||||
job->request_payload.sg_cnt,
|
||||
((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
|
||||
job->request_payload.payload_len);
|
||||
cmdiocbq->cmd_dmabuf->virt,
|
||||
cmdsize);
|
||||
|
||||
rpi = ndlp->nlp_rpi;
|
||||
|
||||
@ -722,8 +720,8 @@ lpfc_bsg_rport_els(struct bsg_job *job)
|
||||
else
|
||||
cmdiocbq->iocb.ulpContext = rpi;
|
||||
cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
|
||||
cmdiocbq->context1 = dd_data;
|
||||
cmdiocbq->context_un.ndlp = ndlp;
|
||||
cmdiocbq->context_un.dd_data = dd_data;
|
||||
cmdiocbq->ndlp = ndlp;
|
||||
cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp;
|
||||
dd_data->type = TYPE_IOCB;
|
||||
dd_data->set_job = job;
|
||||
@ -742,12 +740,6 @@ lpfc_bsg_rport_els(struct bsg_job *job)
|
||||
readl(phba->HCregaddr); /* flush */
|
||||
}
|
||||
|
||||
cmdiocbq->context1 = lpfc_nlp_get(ndlp);
|
||||
if (!cmdiocbq->context1) {
|
||||
rc = -EIO;
|
||||
goto linkdown_err;
|
||||
}
|
||||
|
||||
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
|
||||
if (rc == IOCB_SUCCESS) {
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
@ -917,8 +909,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
struct ulp_bde64 *bde;
|
||||
dma_addr_t dma_addr;
|
||||
int i;
|
||||
struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
|
||||
struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
|
||||
struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf;
|
||||
struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf;
|
||||
struct lpfc_sli_ct_request *ct_req;
|
||||
struct bsg_job *job = NULL;
|
||||
struct fc_bsg_reply *bsg_reply;
|
||||
@ -985,9 +977,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
list_for_each_entry(iocbq, &head, list) {
|
||||
size = 0;
|
||||
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
|
||||
bdeBuf1 = iocbq->context2;
|
||||
bdeBuf2 = iocbq->context3;
|
||||
|
||||
bdeBuf1 = iocbq->cmd_dmabuf;
|
||||
bdeBuf2 = iocbq->bpl_dmabuf;
|
||||
}
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
bde_count = iocbq->wcqe_cmpl.word3;
|
||||
@ -1384,7 +1375,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
|
||||
int rc = 0;
|
||||
u32 ulp_status, ulp_word4;
|
||||
|
||||
dd_data = cmdiocbq->context1;
|
||||
dd_data = cmdiocbq->context_un.dd_data;
|
||||
|
||||
/* Determine if job has been aborted */
|
||||
spin_lock_irqsave(&phba->ct_ev_lock, flags);
|
||||
@ -1401,8 +1392,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
|
||||
ndlp = dd_data->context_un.iocb.ndlp;
|
||||
cmp = cmdiocbq->context2;
|
||||
bmp = cmdiocbq->context3;
|
||||
cmp = cmdiocbq->cmd_dmabuf;
|
||||
bmp = cmdiocbq->bpl_dmabuf;
|
||||
|
||||
ulp_status = get_job_ulpstatus(phba, rspiocbq);
|
||||
ulp_word4 = get_job_word4(phba, rspiocbq);
|
||||
@ -1529,10 +1520,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
|
||||
|
||||
ctiocb->cmd_flag |= LPFC_IO_LIBDFC;
|
||||
ctiocb->vport = phba->pport;
|
||||
ctiocb->context1 = dd_data;
|
||||
ctiocb->context2 = cmp;
|
||||
ctiocb->context3 = bmp;
|
||||
ctiocb->context_un.ndlp = ndlp;
|
||||
ctiocb->context_un.dd_data = dd_data;
|
||||
ctiocb->cmd_dmabuf = cmp;
|
||||
ctiocb->bpl_dmabuf = bmp;
|
||||
ctiocb->ndlp = ndlp;
|
||||
ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp;
|
||||
|
||||
dd_data->type = TYPE_IOCB;
|
||||
@ -2671,7 +2662,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
|
||||
ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
|
||||
ctreq->CommandResponse.bits.Size = 0;
|
||||
|
||||
cmdiocbq->context3 = dmabuf;
|
||||
cmdiocbq->bpl_dmabuf = dmabuf;
|
||||
cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
|
||||
cmdiocbq->vport = phba->pport;
|
||||
cmdiocbq->cmd_cmpl = NULL;
|
||||
@ -3231,7 +3222,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
|
||||
cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
|
||||
cmdiocbq->vport = phba->pport;
|
||||
cmdiocbq->cmd_cmpl = NULL;
|
||||
cmdiocbq->context3 = txbmp;
|
||||
cmdiocbq->bpl_dmabuf = txbmp;
|
||||
|
||||
if (phba->sli_rev < LPFC_SLI_REV4) {
|
||||
lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri,
|
||||
@ -3384,7 +3375,7 @@ job_error:
|
||||
* This is completion handler function for mailbox commands issued from
|
||||
* lpfc_bsg_issue_mbox function. This function is called by the
|
||||
* mailbox event handler function with no lock held. This function
|
||||
* will wake up thread waiting on the wait queue pointed by context1
|
||||
* will wake up thread waiting on the wait queue pointed by dd_data
|
||||
* of the mailbox.
|
||||
**/
|
||||
static void
|
||||
@ -5034,9 +5025,9 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
|
||||
unsigned int rsp_size;
|
||||
int rc = 0;
|
||||
|
||||
dd_data = cmdiocbq->context1;
|
||||
cmp = cmdiocbq->context2;
|
||||
bmp = cmdiocbq->context3;
|
||||
dd_data = cmdiocbq->context_un.dd_data;
|
||||
cmp = cmdiocbq->cmd_dmabuf;
|
||||
bmp = cmdiocbq->bpl_dmabuf;
|
||||
menlo = &dd_data->context_un.menlo;
|
||||
rmp = menlo->rmp;
|
||||
rsp = &rspiocbq->iocb;
|
||||
@ -5233,9 +5224,9 @@ lpfc_menlo_cmd(struct bsg_job *job)
|
||||
/* We want the firmware to timeout before we do */
|
||||
cmd->ulpTimeout = MENLO_TIMEOUT - 5;
|
||||
cmdiocbq->cmd_cmpl = lpfc_bsg_menlo_cmd_cmp;
|
||||
cmdiocbq->context1 = dd_data;
|
||||
cmdiocbq->context2 = cmp;
|
||||
cmdiocbq->context3 = bmp;
|
||||
cmdiocbq->context_un.dd_data = dd_data;
|
||||
cmdiocbq->cmd_dmabuf = cmp;
|
||||
cmdiocbq->bpl_dmabuf = bmp;
|
||||
if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
|
||||
cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
|
||||
cmd->ulpPU = MENLO_PU; /* 3 */
|
||||
|
@ -32,7 +32,9 @@ int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
|
||||
int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *, struct lpfcMboxq *);
|
||||
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
|
||||
|
||||
int lpfc_mbox_rsrc_prep(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox);
|
||||
void lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
|
||||
enum lpfc_mbox_ctx locked);
|
||||
void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
int lpfc_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
|
||||
void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
@ -432,6 +434,7 @@ void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virtp, dma_addr_t dma);
|
||||
|
||||
void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
|
||||
void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp);
|
||||
void lpfc_setup_fdmi_mask(struct lpfc_vport *vport);
|
||||
int lpfc_link_reset(struct lpfc_vport *vport);
|
||||
|
||||
/* Function prototypes. */
|
||||
|
@ -118,22 +118,22 @@ lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct lpfc_dmabuf *mp, *bmp;
|
||||
|
||||
ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
|
||||
ndlp = cmdiocb->ndlp;
|
||||
if (ndlp)
|
||||
lpfc_nlp_put(ndlp);
|
||||
|
||||
mp = cmdiocb->context2;
|
||||
bmp = cmdiocb->context3;
|
||||
mp = cmdiocb->rsp_dmabuf;
|
||||
bmp = cmdiocb->bpl_dmabuf;
|
||||
if (mp) {
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
cmdiocb->context2 = NULL;
|
||||
cmdiocb->rsp_dmabuf = NULL;
|
||||
}
|
||||
|
||||
if (bmp) {
|
||||
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
|
||||
kfree(bmp);
|
||||
cmdiocb->context3 = NULL;
|
||||
cmdiocb->bpl_dmabuf = NULL;
|
||||
}
|
||||
|
||||
lpfc_sli_release_iocbq(phba, cmdiocb);
|
||||
@ -232,18 +232,17 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
|
||||
}
|
||||
|
||||
/* Save for completion so we can release these resources */
|
||||
cmdiocbq->context2 = (uint8_t *)mp;
|
||||
cmdiocbq->context3 = (uint8_t *)bmp;
|
||||
cmdiocbq->rsp_dmabuf = mp;
|
||||
cmdiocbq->bpl_dmabuf = bmp;
|
||||
cmdiocbq->cmd_cmpl = lpfc_ct_unsol_cmpl;
|
||||
tmo = (3 * phba->fc_ratov);
|
||||
|
||||
cmdiocbq->retry = 0;
|
||||
cmdiocbq->vport = vport;
|
||||
cmdiocbq->context_un.ndlp = NULL;
|
||||
cmdiocbq->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
|
||||
|
||||
cmdiocbq->context1 = lpfc_nlp_get(ndlp);
|
||||
if (!cmdiocbq->context1)
|
||||
cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
|
||||
if (!cmdiocbq->ndlp)
|
||||
goto ct_no_ndlp;
|
||||
|
||||
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
|
||||
@ -310,8 +309,8 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq)
|
||||
return;
|
||||
}
|
||||
|
||||
ct_req = ((struct lpfc_sli_ct_request *)
|
||||
(((struct lpfc_dmabuf *)ctiocbq->context2)->virt));
|
||||
ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt;
|
||||
|
||||
mi_cmd = ct_req->CommandResponse.bits.CmdRsp;
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
|
||||
"6442 : MI Cmd : x%x Not Supported\n", mi_cmd);
|
||||
@ -347,14 +346,14 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
uint32_t size;
|
||||
struct list_head head;
|
||||
struct lpfc_sli_ct_request *ct_req;
|
||||
struct lpfc_dmabuf *bdeBuf1 = ctiocbq->context2;
|
||||
struct lpfc_dmabuf *bdeBuf2 = ctiocbq->context3;
|
||||
struct lpfc_dmabuf *bdeBuf1 = ctiocbq->cmd_dmabuf;
|
||||
struct lpfc_dmabuf *bdeBuf2 = ctiocbq->bpl_dmabuf;
|
||||
u32 status, parameter, bde_count = 0;
|
||||
struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
|
||||
|
||||
ctiocbq->context1 = NULL;
|
||||
ctiocbq->context2 = NULL;
|
||||
ctiocbq->context3 = NULL;
|
||||
ctiocbq->cmd_dmabuf = NULL;
|
||||
ctiocbq->rsp_dmabuf = NULL;
|
||||
ctiocbq->bpl_dmabuf = NULL;
|
||||
|
||||
wcqe_cmpl = &ctiocbq->wcqe_cmpl;
|
||||
status = get_job_ulpstatus(phba, ctiocbq);
|
||||
@ -382,12 +381,11 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
if (bde_count == 0)
|
||||
return;
|
||||
|
||||
ctiocbq->context2 = bdeBuf1;
|
||||
ctiocbq->cmd_dmabuf = bdeBuf1;
|
||||
if (bde_count == 2)
|
||||
ctiocbq->context3 = bdeBuf2;
|
||||
ctiocbq->bpl_dmabuf = bdeBuf2;
|
||||
|
||||
ct_req = ((struct lpfc_sli_ct_request *)
|
||||
(((struct lpfc_dmabuf *)ctiocbq->context2)->virt));
|
||||
ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt;
|
||||
|
||||
if (ct_req->FsType == SLI_CT_MANAGEMENT_SERVICE &&
|
||||
ct_req->FsSubType == SLI_CT_MIB_Subtypes) {
|
||||
@ -408,8 +406,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
|
||||
if (!bde_count)
|
||||
continue;
|
||||
bdeBuf1 = iocb->context2;
|
||||
iocb->context2 = NULL;
|
||||
bdeBuf1 = iocb->cmd_dmabuf;
|
||||
iocb->cmd_dmabuf = NULL;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
size = iocb->wqe.gen_req.bde.tus.f.bdeSize;
|
||||
else
|
||||
@ -417,8 +415,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf1, size);
|
||||
lpfc_in_buf_free(phba, bdeBuf1);
|
||||
if (bde_count == 2) {
|
||||
bdeBuf2 = iocb->context3;
|
||||
iocb->context3 = NULL;
|
||||
bdeBuf2 = iocb->bpl_dmabuf;
|
||||
iocb->bpl_dmabuf = NULL;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
size = iocb->unsol_rcv_len;
|
||||
else
|
||||
@ -549,24 +547,25 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
|
||||
{
|
||||
struct lpfc_dmabuf *buf_ptr;
|
||||
|
||||
/* I/O job is complete so context is now invalid*/
|
||||
ctiocb->context_un.ndlp = NULL;
|
||||
if (ctiocb->context1) {
|
||||
buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1;
|
||||
/* IOCBQ job structure gets cleaned during release. Just release
|
||||
* the dma buffers here.
|
||||
*/
|
||||
if (ctiocb->cmd_dmabuf) {
|
||||
buf_ptr = ctiocb->cmd_dmabuf;
|
||||
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
|
||||
kfree(buf_ptr);
|
||||
ctiocb->context1 = NULL;
|
||||
ctiocb->cmd_dmabuf = NULL;
|
||||
}
|
||||
if (ctiocb->context2) {
|
||||
lpfc_free_ct_rsp(phba, (struct lpfc_dmabuf *) ctiocb->context2);
|
||||
ctiocb->context2 = NULL;
|
||||
if (ctiocb->rsp_dmabuf) {
|
||||
lpfc_free_ct_rsp(phba, ctiocb->rsp_dmabuf);
|
||||
ctiocb->rsp_dmabuf = NULL;
|
||||
}
|
||||
|
||||
if (ctiocb->context3) {
|
||||
buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
|
||||
if (ctiocb->bpl_dmabuf) {
|
||||
buf_ptr = ctiocb->bpl_dmabuf;
|
||||
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
|
||||
kfree(buf_ptr);
|
||||
ctiocb->context3 = NULL;
|
||||
ctiocb->bpl_dmabuf = NULL;
|
||||
}
|
||||
lpfc_sli_release_iocbq(phba, ctiocb);
|
||||
return 0;
|
||||
@ -605,11 +604,11 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
|
||||
/* Update the num_entry bde count */
|
||||
geniocb->num_bdes = num_entry;
|
||||
|
||||
geniocb->context3 = (uint8_t *) bmp;
|
||||
geniocb->bpl_dmabuf = bmp;
|
||||
|
||||
/* Save for completion so we can release these resources */
|
||||
geniocb->context1 = (uint8_t *) inp;
|
||||
geniocb->context2 = (uint8_t *) outp;
|
||||
geniocb->cmd_dmabuf = inp;
|
||||
geniocb->rsp_dmabuf = outp;
|
||||
|
||||
geniocb->event_tag = event_tag;
|
||||
|
||||
@ -635,8 +634,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
|
||||
geniocb->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
|
||||
geniocb->vport = vport;
|
||||
geniocb->retry = retry;
|
||||
geniocb->context_un.ndlp = lpfc_nlp_get(ndlp);
|
||||
if (!geniocb->context_un.ndlp)
|
||||
geniocb->ndlp = lpfc_nlp_get(ndlp);
|
||||
if (!geniocb->ndlp)
|
||||
goto out;
|
||||
|
||||
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
|
||||
@ -926,13 +925,12 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
int rc, type;
|
||||
|
||||
/* First save ndlp, before we overwrite it */
|
||||
ndlp = cmdiocb->context_un.ndlp;
|
||||
ndlp = cmdiocb->ndlp;
|
||||
|
||||
/* we pass cmdiocb to state machine which needs rspiocb as well */
|
||||
cmdiocb->context_un.rsp_iocb = rspiocb;
|
||||
|
||||
inp = (struct lpfc_dmabuf *) cmdiocb->context1;
|
||||
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
cmdiocb->rsp_iocb = rspiocb;
|
||||
inp = cmdiocb->cmd_dmabuf;
|
||||
outp = cmdiocb->rsp_dmabuf;
|
||||
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
|
||||
"GID_FT cmpl: status:x%x/x%x rtry:%d",
|
||||
@ -962,9 +960,15 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
}
|
||||
if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"0226 NS query failed due to link event\n");
|
||||
"0226 NS query failed due to link event: "
|
||||
"ulp_status x%x ulp_word4 x%x fc_flag x%x "
|
||||
"port_state x%x gidft_inp x%x\n",
|
||||
ulp_status, ulp_word4, vport->fc_flag,
|
||||
vport->port_state, vport->gidft_inp);
|
||||
if (vport->fc_flag & FC_RSCN_MODE)
|
||||
lpfc_els_flush_rscn(vport);
|
||||
if (vport->gidft_inp)
|
||||
vport->gidft_inp--;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1143,12 +1147,12 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
int rc;
|
||||
|
||||
/* First save ndlp, before we overwrite it */
|
||||
ndlp = cmdiocb->context_un.ndlp;
|
||||
ndlp = cmdiocb->ndlp;
|
||||
|
||||
/* we pass cmdiocb to state machine which needs rspiocb as well */
|
||||
cmdiocb->context_un.rsp_iocb = rspiocb;
|
||||
inp = (struct lpfc_dmabuf *)cmdiocb->context1;
|
||||
outp = (struct lpfc_dmabuf *)cmdiocb->context2;
|
||||
cmdiocb->rsp_iocb = rspiocb;
|
||||
inp = cmdiocb->cmd_dmabuf;
|
||||
outp = cmdiocb->rsp_dmabuf;
|
||||
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
|
||||
"GID_PT cmpl: status:x%x/x%x rtry:%d",
|
||||
@ -1179,9 +1183,15 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
}
|
||||
if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"4166 NS query failed due to link event\n");
|
||||
"4166 NS query failed due to link event: "
|
||||
"ulp_status x%x ulp_word4 x%x fc_flag x%x "
|
||||
"port_state x%x gidft_inp x%x\n",
|
||||
ulp_status, ulp_word4, vport->fc_flag,
|
||||
vport->port_state, vport->gidft_inp);
|
||||
if (vport->fc_flag & FC_RSCN_MODE)
|
||||
lpfc_els_flush_rscn(vport);
|
||||
if (vport->gidft_inp)
|
||||
vport->gidft_inp--;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1346,8 +1356,8 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
{
|
||||
struct lpfc_vport *vport = cmdiocb->vport;
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
|
||||
struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
|
||||
struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
|
||||
struct lpfc_sli_ct_request *CTrsp;
|
||||
int did, rc, retry;
|
||||
uint8_t fbits;
|
||||
@ -1426,7 +1436,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
cmdiocb->retry, did);
|
||||
if (rc == 0) {
|
||||
/* success */
|
||||
free_ndlp = cmdiocb->context_un.ndlp;
|
||||
free_ndlp = cmdiocb->ndlp;
|
||||
lpfc_ct_free_iocb(phba, cmdiocb);
|
||||
lpfc_nlp_put(free_ndlp);
|
||||
return;
|
||||
@ -1483,7 +1493,7 @@ out:
|
||||
}
|
||||
|
||||
iocb_free:
|
||||
free_ndlp = cmdiocb->context_un.ndlp;
|
||||
free_ndlp = cmdiocb->ndlp;
|
||||
lpfc_ct_free_iocb(phba, cmdiocb);
|
||||
lpfc_nlp_put(free_ndlp);
|
||||
return;
|
||||
@ -1494,8 +1504,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_iocbq *rspiocb)
|
||||
{
|
||||
struct lpfc_vport *vport = cmdiocb->vport;
|
||||
struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *)cmdiocb->context1;
|
||||
struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *)cmdiocb->context2;
|
||||
struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
|
||||
struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
|
||||
struct lpfc_sli_ct_request *CTrsp;
|
||||
int did;
|
||||
struct lpfc_nodelist *ndlp = NULL;
|
||||
@ -1519,7 +1529,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
}
|
||||
|
||||
/* Preserve the nameserver node to release the reference. */
|
||||
ns_ndlp = cmdiocb->context_un.ndlp;
|
||||
ns_ndlp = cmdiocb->ndlp;
|
||||
|
||||
if (ulp_status == IOSTAT_SUCCESS) {
|
||||
/* Good status, continue checking */
|
||||
@ -1605,13 +1615,13 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
u32 ulp_word4 = get_job_word4(phba, rspiocb);
|
||||
|
||||
/* First save ndlp, before we overwrite it */
|
||||
ndlp = cmdiocb->context_un.ndlp;
|
||||
ndlp = cmdiocb->ndlp;
|
||||
|
||||
/* we pass cmdiocb to state machine which needs rspiocb as well */
|
||||
cmdiocb->context_un.rsp_iocb = rspiocb;
|
||||
cmdiocb->rsp_iocb = rspiocb;
|
||||
|
||||
inp = (struct lpfc_dmabuf *) cmdiocb->context1;
|
||||
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
inp = cmdiocb->cmd_dmabuf;
|
||||
outp = cmdiocb->rsp_dmabuf;
|
||||
|
||||
cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
|
||||
CommandResponse.bits.CmdRsp);
|
||||
@ -1672,8 +1682,8 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_dmabuf *outp;
|
||||
struct lpfc_sli_ct_request *CTrsp;
|
||||
|
||||
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
|
||||
outp = cmdiocb->rsp_dmabuf;
|
||||
CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
|
||||
if (CTrsp->CommandResponse.bits.CmdRsp ==
|
||||
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
|
||||
vport->ct_flags |= FC_CT_RFT_ID;
|
||||
@ -1693,7 +1703,7 @@ lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_dmabuf *outp;
|
||||
struct lpfc_sli_ct_request *CTrsp;
|
||||
|
||||
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
outp = cmdiocb->rsp_dmabuf;
|
||||
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
|
||||
if (CTrsp->CommandResponse.bits.CmdRsp ==
|
||||
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
|
||||
@ -1714,8 +1724,8 @@ lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_dmabuf *outp;
|
||||
struct lpfc_sli_ct_request *CTrsp;
|
||||
|
||||
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
|
||||
outp = cmdiocb->rsp_dmabuf;
|
||||
CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
|
||||
if (CTrsp->CommandResponse.bits.CmdRsp ==
|
||||
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
|
||||
vport->ct_flags |= FC_CT_RSPN_ID;
|
||||
@ -1735,7 +1745,7 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_dmabuf *outp;
|
||||
struct lpfc_sli_ct_request *CTrsp;
|
||||
|
||||
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
outp = cmdiocb->rsp_dmabuf;
|
||||
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
|
||||
if (CTrsp->CommandResponse.bits.CmdRsp ==
|
||||
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
|
||||
@ -1768,8 +1778,8 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_dmabuf *outp;
|
||||
struct lpfc_sli_ct_request *CTrsp;
|
||||
|
||||
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
|
||||
outp = cmdiocb->rsp_dmabuf;
|
||||
CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
|
||||
if (CTrsp->CommandResponse.bits.CmdRsp ==
|
||||
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
|
||||
vport->ct_flags |= FC_CT_RFF_ID;
|
||||
@ -1865,7 +1875,7 @@ lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb)
|
||||
struct lpfc_dmabuf *mp;
|
||||
uint32_t type;
|
||||
|
||||
mp = cmdiocb->context1;
|
||||
mp = cmdiocb->cmd_dmabuf;
|
||||
if (mp == NULL)
|
||||
return 0;
|
||||
CtReq = (struct lpfc_sli_ct_request *)mp->virt;
|
||||
@ -2018,28 +2028,30 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
|
||||
vport->ct_flags &= ~FC_CT_RFT_ID;
|
||||
CtReq->CommandResponse.bits.CmdRsp =
|
||||
cpu_to_be16(SLI_CTNS_RFT_ID);
|
||||
CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
|
||||
CtReq->un.rft.port_id = cpu_to_be32(vport->fc_myDID);
|
||||
|
||||
/* Register Application Services type if vmid enabled. */
|
||||
if (phba->cfg_vmid_app_header)
|
||||
CtReq->un.rft.app_serv_reg =
|
||||
cpu_to_be32(RFT_APP_SERV_REG);
|
||||
|
||||
/* Register FC4 FCP type if enabled. */
|
||||
if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
|
||||
vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
|
||||
CtReq->un.rft.fcpReg = 1;
|
||||
CtReq->un.rft.fcp_reg = cpu_to_be32(RFT_FCP_REG);
|
||||
|
||||
/* Register NVME type if enabled. Defined LE and swapped.
|
||||
* rsvd[0] is used as word1 because of the hard-coded
|
||||
* word0 usage in the ct_request data structure.
|
||||
*/
|
||||
/* Register NVME type if enabled. */
|
||||
if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
|
||||
vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
|
||||
CtReq->un.rft.rsvd[0] =
|
||||
cpu_to_be32(LPFC_FC4_TYPE_BITMASK);
|
||||
CtReq->un.rft.nvme_reg = cpu_to_be32(RFT_NVME_REG);
|
||||
|
||||
ptr = (uint32_t *)CtReq;
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"6433 Issue RFT (%s %s): %08x %08x %08x %08x "
|
||||
"%08x %08x %08x %08x\n",
|
||||
CtReq->un.rft.fcpReg ? "FCP" : " ",
|
||||
CtReq->un.rft.rsvd[0] ? "NVME" : " ",
|
||||
"6433 Issue RFT (%s %s %s): %08x %08x %08x "
|
||||
"%08x %08x %08x %08x %08x\n",
|
||||
CtReq->un.rft.fcp_reg ? "FCP" : " ",
|
||||
CtReq->un.rft.nvme_reg ? "NVME" : " ",
|
||||
CtReq->un.rft.app_serv_reg ? "APPS" : " ",
|
||||
*ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
|
||||
*(ptr + 4), *(ptr + 5),
|
||||
*(ptr + 6), *(ptr + 7));
|
||||
@ -2155,6 +2167,41 @@ ns_cmd_exit:
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_fdmi_rprt_defer - Check for any deferred FDMI RPRT commands
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @mask: Initial port attributes mask
|
||||
*
|
||||
* This function checks to see if any vports have deferred their FDMI RPRT.
|
||||
* A vports RPRT may be deferred if it is issued before the primary ports
|
||||
* RHBA completes.
|
||||
*/
|
||||
static void
|
||||
lpfc_fdmi_rprt_defer(struct lpfc_hba *phba, uint32_t mask)
|
||||
{
|
||||
struct lpfc_vport **vports;
|
||||
struct lpfc_vport *vport;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
int i;
|
||||
|
||||
phba->hba_flag |= HBA_RHBA_CMPL;
|
||||
vports = lpfc_create_vport_work_array(phba);
|
||||
if (vports) {
|
||||
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
|
||||
vport = vports[i];
|
||||
ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
|
||||
if (!ndlp)
|
||||
continue;
|
||||
if (vport->ct_flags & FC_CT_RPRT_DEFER) {
|
||||
vport->ct_flags &= ~FC_CT_RPRT_DEFER;
|
||||
vport->fdmi_port_mask = mask;
|
||||
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
lpfc_destroy_vport_work_array(phba, vports);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_cmpl_ct_disc_fdmi - Handle a discovery FDMI completion
|
||||
* @phba: Pointer to HBA context object.
|
||||
@ -2169,8 +2216,8 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_iocbq *rspiocb)
|
||||
{
|
||||
struct lpfc_vport *vport = cmdiocb->vport;
|
||||
struct lpfc_dmabuf *inp = cmdiocb->context1;
|
||||
struct lpfc_dmabuf *outp = cmdiocb->context2;
|
||||
struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
|
||||
struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
|
||||
struct lpfc_sli_ct_request *CTcmd = inp->virt;
|
||||
struct lpfc_sli_ct_request *CTrsp = outp->virt;
|
||||
uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
|
||||
@ -2224,7 +2271,7 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
ulp_word4);
|
||||
}
|
||||
|
||||
free_ndlp = cmdiocb->context_un.ndlp;
|
||||
free_ndlp = cmdiocb->ndlp;
|
||||
lpfc_ct_free_iocb(phba, cmdiocb);
|
||||
lpfc_nlp_put(free_ndlp);
|
||||
|
||||
@ -2236,15 +2283,19 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
cmd = be16_to_cpu(fdmi_cmd);
|
||||
if (fdmi_rsp == cpu_to_be16(SLI_CT_RESPONSE_FS_RJT)) {
|
||||
/* FDMI rsp failed */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS,
|
||||
"0220 FDMI cmd failed FS_RJT Data: x%x", cmd);
|
||||
|
||||
/* Should we fallback to FDMI-2 / FDMI-1 ? */
|
||||
switch (cmd) {
|
||||
case SLI_MGMT_RHBA:
|
||||
if (vport->fdmi_hba_mask == LPFC_FDMI2_HBA_ATTR) {
|
||||
/* Fallback to FDMI-1 */
|
||||
/* Fallback to FDMI-1 for HBA attributes */
|
||||
vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR;
|
||||
|
||||
/* If HBA attributes are FDMI1, so should
|
||||
* port attributes be for consistency.
|
||||
*/
|
||||
vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
|
||||
/* Start over */
|
||||
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
|
||||
@ -2252,6 +2303,11 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
return;
|
||||
|
||||
case SLI_MGMT_RPRT:
|
||||
if (vport->port_type != LPFC_PHYSICAL_PORT) {
|
||||
ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
|
||||
if (!ndlp)
|
||||
return;
|
||||
}
|
||||
if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
|
||||
/* Fallback to FDMI-1 */
|
||||
vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
|
||||
@ -2272,9 +2328,9 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
phba->link_flag &= ~LS_CT_VEN_RPA;
|
||||
if (phba->cmf_active_mode == LPFC_CFG_OFF)
|
||||
return;
|
||||
lpfc_printf_log(phba, KERN_ERR,
|
||||
lpfc_printf_log(phba, KERN_WARNING,
|
||||
LOG_DISCOVERY | LOG_ELS,
|
||||
"6460 VEN FDMI RPA failure\n");
|
||||
"6460 VEN FDMI RPA RJT\n");
|
||||
return;
|
||||
}
|
||||
if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
|
||||
@ -2301,6 +2357,9 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
*/
|
||||
switch (cmd) {
|
||||
case SLI_MGMT_RHBA:
|
||||
/* Check for any RPRTs deferred till after RHBA completes */
|
||||
lpfc_fdmi_rprt_defer(phba, vport->fdmi_port_mask);
|
||||
|
||||
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, 0);
|
||||
break;
|
||||
|
||||
@ -2309,10 +2368,26 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
break;
|
||||
|
||||
case SLI_MGMT_DPRT:
|
||||
if (vport->port_type == LPFC_PHYSICAL_PORT)
|
||||
if (vport->port_type == LPFC_PHYSICAL_PORT) {
|
||||
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA, 0);
|
||||
else
|
||||
} else {
|
||||
ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
|
||||
if (!ndlp)
|
||||
return;
|
||||
|
||||
/* Only issue a RPRT for the vport if the RHBA
|
||||
* for the physical port completes successfully.
|
||||
* We may have to defer the RPRT accordingly.
|
||||
*/
|
||||
if (phba->hba_flag & HBA_RHBA_CMPL) {
|
||||
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
|
||||
} else {
|
||||
lpfc_printf_vlog(vport, KERN_INFO,
|
||||
LOG_DISCOVERY,
|
||||
"6078 RPRT deferred\n");
|
||||
vport->ct_flags |= FC_CT_RPRT_DEFER;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case SLI_MGMT_RPA:
|
||||
if (vport->port_type == LPFC_PHYSICAL_PORT &&
|
||||
@ -2327,7 +2402,8 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
break;
|
||||
}
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
|
||||
lpfc_printf_log(phba, KERN_INFO,
|
||||
LOG_DISCOVERY | LOG_CGN_MGMT,
|
||||
"6210 Issue Vendor MI FDMI %x\n",
|
||||
phba->sli4_hba.pc_sli4_params.mi_ver);
|
||||
|
||||
@ -2396,6 +2472,9 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
|
||||
phba->link_flag &= ~LS_CT_VEN_RPA;
|
||||
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
|
||||
} else {
|
||||
ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
|
||||
if (!ndlp)
|
||||
return;
|
||||
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
|
||||
}
|
||||
|
||||
@ -2417,6 +2496,9 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
|
||||
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
|
||||
LPFC_FDMI_PORT_ATTR_num_disc);
|
||||
} else {
|
||||
ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
|
||||
if (!ndlp)
|
||||
return;
|
||||
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT,
|
||||
LPFC_FDMI_PORT_ATTR_num_disc);
|
||||
}
|
||||
@ -2830,11 +2912,38 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_fdmi_attr_entry *ae;
|
||||
uint32_t size;
|
||||
u32 tcfg;
|
||||
u8 i, cnt;
|
||||
|
||||
ae = &ad->AttrValue;
|
||||
|
||||
ae->un.AttrInt = 0;
|
||||
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
|
||||
cnt = 0;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
tcfg = phba->sli4_hba.conf_trunk;
|
||||
for (i = 0; i < 4; i++, tcfg >>= 1)
|
||||
if (tcfg & 1)
|
||||
cnt++;
|
||||
}
|
||||
|
||||
if (cnt > 2) { /* 4 lane trunk group */
|
||||
if (phba->lmt & LMT_64Gb)
|
||||
ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
|
||||
if (phba->lmt & LMT_32Gb)
|
||||
ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
|
||||
if (phba->lmt & LMT_16Gb)
|
||||
ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
|
||||
} else if (cnt) { /* 2 lane trunk group */
|
||||
if (phba->lmt & LMT_128Gb)
|
||||
ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
|
||||
if (phba->lmt & LMT_64Gb)
|
||||
ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
|
||||
if (phba->lmt & LMT_32Gb)
|
||||
ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
|
||||
if (phba->lmt & LMT_16Gb)
|
||||
ae->un.AttrInt |= HBA_PORTSPEED_32GFC;
|
||||
} else {
|
||||
if (phba->lmt & LMT_256Gb)
|
||||
ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
|
||||
if (phba->lmt & LMT_128Gb)
|
||||
@ -2855,6 +2964,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
|
||||
ae->un.AttrInt |= HBA_PORTSPEED_2GFC;
|
||||
if (phba->lmt & LMT_1Gb)
|
||||
ae->un.AttrInt |= HBA_PORTSPEED_1GFC;
|
||||
}
|
||||
} else {
|
||||
/* FCoE links support only one speed */
|
||||
switch (phba->fc_linkspeed) {
|
||||
@ -3125,6 +3235,7 @@ static int
|
||||
lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
|
||||
struct lpfc_fdmi_attr_def *ad)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_fdmi_attr_entry *ae;
|
||||
uint32_t size;
|
||||
|
||||
@ -3135,7 +3246,8 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
|
||||
ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
|
||||
|
||||
/* Check to see if NVME is configured or not */
|
||||
if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
||||
if (vport == phba->pport &&
|
||||
phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
||||
ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
|
||||
|
||||
size = FOURBYTES + 32;
|
||||
@ -3459,8 +3571,10 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
|
||||
/* FDMI request */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"0218 FDMI Request Data: x%x x%x x%x\n",
|
||||
vport->fc_flag, vport->port_state, cmdcode);
|
||||
"0218 FDMI Request x%x mask x%x Data: x%x x%x x%x\n",
|
||||
cmdcode, new_mask, vport->fdmi_port_mask,
|
||||
vport->fc_flag, vport->port_state);
|
||||
|
||||
CtReq = (struct lpfc_sli_ct_request *)mp->virt;
|
||||
|
||||
/* First populate the CT_IU preamble */
|
||||
@ -3529,6 +3643,12 @@ hba_out:
|
||||
break;
|
||||
|
||||
case SLI_MGMT_RPRT:
|
||||
if (vport->port_type != LPFC_PHYSICAL_PORT) {
|
||||
ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
|
||||
if (!ndlp)
|
||||
return 0;
|
||||
}
|
||||
fallthrough;
|
||||
case SLI_MGMT_RPA:
|
||||
pab = (struct lpfc_fdmi_reg_portattr *)&CtReq->un.PortID;
|
||||
if (cmdcode == SLI_MGMT_RPRT) {
|
||||
@ -3593,6 +3713,12 @@ port_out:
|
||||
rsp_size = FC_MAX_NS_RSP;
|
||||
fallthrough;
|
||||
case SLI_MGMT_DPRT:
|
||||
if (vport->port_type != LPFC_PHYSICAL_PORT) {
|
||||
ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
|
||||
if (!ndlp)
|
||||
return 0;
|
||||
}
|
||||
fallthrough;
|
||||
case SLI_MGMT_DPA:
|
||||
pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
|
||||
memcpy((uint8_t *)&pe->PortName,
|
||||
@ -3780,8 +3906,8 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_iocbq *rspiocb)
|
||||
{
|
||||
struct lpfc_vport *vport = cmdiocb->vport;
|
||||
struct lpfc_dmabuf *inp = cmdiocb->context1;
|
||||
struct lpfc_dmabuf *outp = cmdiocb->context2;
|
||||
struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
|
||||
struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
|
||||
struct lpfc_sli_ct_request *ctcmd = inp->virt;
|
||||
struct lpfc_sli_ct_request *ctrsp = outp->virt;
|
||||
u16 rsp = ctrsp->CommandResponse.bits.CmdRsp;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1183,6 +1183,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
|
||||
void
|
||||
lpfc_linkdown_port(struct lpfc_vport *vport)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
|
||||
if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
|
||||
@ -1200,6 +1201,13 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
|
||||
vport->fc_flag &= ~FC_DISC_DELAYED;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
del_timer_sync(&vport->delayed_disc_tmo);
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4 &&
|
||||
vport->port_type == LPFC_PHYSICAL_PORT &&
|
||||
phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
|
||||
/* Assume success on link up */
|
||||
phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
@ -1221,6 +1229,9 @@ lpfc_linkdown(struct lpfc_hba *phba)
|
||||
|
||||
phba->defer_flogi_acc_flag = false;
|
||||
|
||||
/* Clear external loopback plug detected flag */
|
||||
phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
@ -1343,10 +1354,12 @@ lpfc_linkup_port(struct lpfc_vport *vport)
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
|
||||
FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
|
||||
FC_RSCN_MEMENTO | FC_RSCN_MODE |
|
||||
FC_NLP_MORE | FC_RSCN_DISCOVERY);
|
||||
vport->fc_flag |= FC_NDISC_ACTIVE;
|
||||
vport->fc_ns_retry = 0;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
lpfc_setup_fdmi_mask(vport);
|
||||
|
||||
lpfc_linkup_cleanup_nodes(vport);
|
||||
}
|
||||
@ -1378,8 +1391,8 @@ lpfc_linkup(struct lpfc_hba *phba)
|
||||
phba->pport->rcv_flogi_cnt = 0;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
/* reinitialize initial FLOGI flag */
|
||||
phba->hba_flag &= ~(HBA_FLOGI_ISSUED);
|
||||
/* reinitialize initial HBA flag */
|
||||
phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL);
|
||||
phba->defer_flogi_acc_flag = false;
|
||||
|
||||
return 0;
|
||||
@ -1458,7 +1471,6 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
{
|
||||
struct lpfc_vport *vport = pmb->vport;
|
||||
LPFC_MBOXQ_t *sparam_mb;
|
||||
struct lpfc_dmabuf *sparam_mp;
|
||||
u16 status = pmb->u.mb.mbxStatus;
|
||||
int rc;
|
||||
|
||||
@ -1507,13 +1519,8 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
|
||||
rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED) {
|
||||
sparam_mp = (struct lpfc_dmabuf *)
|
||||
sparam_mb->ctx_buf;
|
||||
lpfc_mbuf_free(phba, sparam_mp->virt,
|
||||
sparam_mp->phys);
|
||||
kfree(sparam_mp);
|
||||
sparam_mb->ctx_buf = NULL;
|
||||
mempool_free(sparam_mb, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, sparam_mb,
|
||||
MBOX_THD_UNLOCKED);
|
||||
goto sparam_out;
|
||||
}
|
||||
|
||||
@ -3312,7 +3319,6 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
|
||||
void
|
||||
lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
{
|
||||
struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf;
|
||||
struct lpfc_vport *vport = mboxq->vport;
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
|
||||
@ -3393,12 +3399,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
}
|
||||
|
||||
out_free_mem:
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
if (dmabuf) {
|
||||
lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
|
||||
kfree(dmabuf);
|
||||
}
|
||||
return;
|
||||
lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3443,9 +3444,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
|
||||
}
|
||||
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
|
||||
/* Check if sending the FLOGI is being deferred to after we get
|
||||
* up to date CSPs from MBX_READ_SPARAM.
|
||||
@ -3457,12 +3456,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
return;
|
||||
|
||||
out:
|
||||
pmb->ctx_buf = NULL;
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
lpfc_issue_clear_la(phba, vport);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
return;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3472,7 +3467,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
|
||||
LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
|
||||
struct Scsi_Host *shost;
|
||||
int i;
|
||||
struct lpfc_dmabuf *mp;
|
||||
int rc;
|
||||
struct fcf_record *fcf_record;
|
||||
uint32_t fc_flags = 0;
|
||||
@ -3600,10 +3594,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
|
||||
sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
|
||||
rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED) {
|
||||
mp = (struct lpfc_dmabuf *)sparam_mbox->ctx_buf;
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
mempool_free(sparam_mbox, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, sparam_mbox, MBOX_THD_UNLOCKED);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -3879,10 +3870,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
}
|
||||
|
||||
lpfc_mbx_cmpl_read_topology_free_mbuf:
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
return;
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3895,9 +3883,13 @@ void
|
||||
lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
{
|
||||
struct lpfc_vport *vport = pmb->vport;
|
||||
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
|
||||
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
|
||||
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
|
||||
|
||||
/* The driver calls the state machine with the pmb pointer
|
||||
* but wants to make sure a stale ctx_buf isn't acted on.
|
||||
* The ctx_buf is restored later and cleaned up.
|
||||
*/
|
||||
pmb->ctx_buf = NULL;
|
||||
pmb->ctx_ndlp = NULL;
|
||||
|
||||
@ -3934,10 +3926,9 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
|
||||
/* Call state machine */
|
||||
lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
|
||||
pmb->ctx_buf = mp;
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
/* decrement the node reference count held for this callback
|
||||
* function.
|
||||
*/
|
||||
@ -4104,11 +4095,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
|
||||
|
||||
vport_buff = (uint8_t *) vport_info;
|
||||
do {
|
||||
/* free dma buffer from previous round */
|
||||
/* While loop iteration forces a free dma buffer from
|
||||
* the previous loop because the mbox is reused and
|
||||
* the dump routine is a single-use construct.
|
||||
*/
|
||||
if (pmb->ctx_buf) {
|
||||
mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
pmb->ctx_buf = NULL;
|
||||
}
|
||||
if (lpfc_dump_static_vport(phba, pmb, offset))
|
||||
goto out;
|
||||
@ -4193,16 +4188,8 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
|
||||
|
||||
out:
|
||||
kfree(vport_info);
|
||||
if (mbx_wait_rc != MBX_TIMEOUT) {
|
||||
if (pmb->ctx_buf) {
|
||||
mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
}
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
}
|
||||
|
||||
return;
|
||||
if (mbx_wait_rc != MBX_TIMEOUT)
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4216,22 +4203,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
{
|
||||
struct lpfc_vport *vport = pmb->vport;
|
||||
MAILBOX_t *mb = &pmb->u.mb;
|
||||
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
|
||||
struct Scsi_Host *shost;
|
||||
|
||||
ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
|
||||
pmb->ctx_ndlp = NULL;
|
||||
pmb->ctx_buf = NULL;
|
||||
|
||||
if (mb->mbxStatus) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"0258 Register Fabric login error: 0x%x\n",
|
||||
mb->mbxStatus);
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
|
||||
/* FLOGI failed, use loop map to make discovery list */
|
||||
lpfc_disc_list_loopmap(vport);
|
||||
@ -4273,9 +4254,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
lpfc_do_scr_ns_plogi(phba, vport);
|
||||
}
|
||||
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
|
||||
/* Drop the reference count from the mbox at the end after
|
||||
* all the current reference to the ndlp have been done.
|
||||
@ -4369,12 +4348,10 @@ void
|
||||
lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
{
|
||||
MAILBOX_t *mb = &pmb->u.mb;
|
||||
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
|
||||
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
|
||||
struct lpfc_vport *vport = pmb->vport;
|
||||
int rc;
|
||||
|
||||
pmb->ctx_buf = NULL;
|
||||
pmb->ctx_ndlp = NULL;
|
||||
vport->gidft_inp = 0;
|
||||
|
||||
@ -4388,9 +4365,7 @@ out:
|
||||
* callback function.
|
||||
*/
|
||||
lpfc_nlp_put(ndlp);
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
|
||||
/* If the node is not registered with the scsi or nvme
|
||||
* transport, remove the fabric node. The failed reg_login
|
||||
@ -4479,10 +4454,7 @@ out:
|
||||
* callback function.
|
||||
*/
|
||||
lpfc_nlp_put(ndlp);
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -4496,13 +4468,9 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
{
|
||||
struct lpfc_vport *vport = pmb->vport;
|
||||
MAILBOX_t *mb = &pmb->u.mb;
|
||||
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
|
||||
|
||||
ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
|
||||
pmb->ctx_ndlp = NULL;
|
||||
pmb->ctx_buf = NULL;
|
||||
|
||||
if (mb->mbxStatus) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"0933 %s: Register FC login error: 0x%x\n",
|
||||
@ -4526,9 +4494,7 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
|
||||
|
||||
out:
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
|
||||
/* Drop the reference count from the mbox at the end after
|
||||
* all the current reference to the ndlp have been done.
|
||||
@ -5155,7 +5121,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
|
||||
if (pring->ringno == LPFC_ELS_RING) {
|
||||
switch (ulp_command) {
|
||||
case CMD_GEN_REQUEST64_CR:
|
||||
if (iocb->context_un.ndlp == ndlp)
|
||||
if (iocb->ndlp == ndlp)
|
||||
return 1;
|
||||
fallthrough;
|
||||
case CMD_ELS_REQUEST64_CR:
|
||||
@ -5163,7 +5129,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
|
||||
return 1;
|
||||
fallthrough;
|
||||
case CMD_XMIT_ELS_RSP64_CX:
|
||||
if (iocb->context1 == (uint8_t *) ndlp)
|
||||
if (iocb->ndlp == ndlp)
|
||||
return 1;
|
||||
}
|
||||
} else if (pring->ringno == LPFC_FCP_RING) {
|
||||
@ -5273,7 +5239,6 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
if (!ndlp)
|
||||
return;
|
||||
lpfc_issue_els_logo(vport, ndlp, 0);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
|
||||
/* Check to see if there are any deferred events to process */
|
||||
if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
|
||||
@ -5300,6 +5265,13 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
ndlp->nlp_flag &= ~NLP_UNREG_INP;
|
||||
spin_unlock_irq(&ndlp->lock);
|
||||
}
|
||||
|
||||
/* The node has an outstanding reference for the unreg. Now
|
||||
* that the LOGO action and cleanup are finished, release
|
||||
* resources.
|
||||
*/
|
||||
lpfc_nlp_put(ndlp);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -5569,7 +5541,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
LPFC_MBOXQ_t *mb, *nextmb;
|
||||
struct lpfc_dmabuf *mp;
|
||||
|
||||
/* Cleanup node for NPort <nlp_DID> */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
|
||||
@ -5607,16 +5578,11 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
|
||||
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
|
||||
(ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
|
||||
mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
|
||||
if (mp) {
|
||||
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
}
|
||||
list_del(&mb->list);
|
||||
mempool_free(mb, phba->mbox_mem_pool);
|
||||
/* We shall not invoke the lpfc_nlp_put to decrement
|
||||
* the ndlp reference count as we are in the process
|
||||
* of lpfc_nlp_release.
|
||||
lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
|
||||
|
||||
/* Don't invoke lpfc_nlp_put. The driver is in
|
||||
* lpfc_nlp_release context.
|
||||
*/
|
||||
}
|
||||
}
|
||||
@ -6098,7 +6064,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
||||
*/
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
|
||||
if (iocb->context1 != ndlp)
|
||||
if (iocb->ndlp != ndlp)
|
||||
continue;
|
||||
|
||||
ulp_command = get_job_cmnd(phba, iocb);
|
||||
@ -6112,7 +6078,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
||||
|
||||
/* Next check the txcmplq */
|
||||
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
|
||||
if (iocb->context1 != ndlp)
|
||||
if (iocb->ndlp != ndlp)
|
||||
continue;
|
||||
|
||||
ulp_command = get_job_cmnd(phba, iocb);
|
||||
@ -6390,8 +6356,9 @@ restart_disc:
|
||||
lpfc_printf_vlog(vport, KERN_ERR,
|
||||
LOG_TRACE_EVENT,
|
||||
"0231 RSCN timeout Data: x%x "
|
||||
"x%x\n",
|
||||
vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
|
||||
"x%x x%x x%x\n",
|
||||
vport->fc_ns_retry, LPFC_MAX_NS_RETRY,
|
||||
vport->port_state, vport->gidft_inp);
|
||||
|
||||
/* Cleanup any outstanding ELS commands */
|
||||
lpfc_els_flush_cmd(vport);
|
||||
@ -6461,11 +6428,9 @@ void
|
||||
lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
{
|
||||
MAILBOX_t *mb = &pmb->u.mb;
|
||||
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
|
||||
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
|
||||
struct lpfc_vport *vport = pmb->vport;
|
||||
|
||||
pmb->ctx_buf = NULL;
|
||||
pmb->ctx_ndlp = NULL;
|
||||
|
||||
if (phba->sli_rev < LPFC_SLI_REV4)
|
||||
@ -6496,10 +6461,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
* function.
|
||||
*/
|
||||
lpfc_nlp_put(ndlp);
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -97,6 +97,18 @@ union CtCommandResponse {
|
||||
#define FC4_FEATURE_INIT 0x2
|
||||
#define FC4_FEATURE_NVME_DISC 0x4
|
||||
|
||||
enum rft_word0 {
|
||||
RFT_FCP_REG = (0x1 << 8),
|
||||
};
|
||||
|
||||
enum rft_word1 {
|
||||
RFT_NVME_REG = (0x1 << 8),
|
||||
};
|
||||
|
||||
enum rft_word3 {
|
||||
RFT_APP_SERV_REG = (0x1 << 0),
|
||||
};
|
||||
|
||||
struct lpfc_sli_ct_request {
|
||||
/* Structure is in Big Endian format */
|
||||
union CtRevisionId RevisionId;
|
||||
@ -131,25 +143,13 @@ struct lpfc_sli_ct_request {
|
||||
uint8_t Fc4Type;
|
||||
} gid_ff;
|
||||
struct rft {
|
||||
uint32_t PortId; /* For RFT_ID requests */
|
||||
__be32 port_id; /* For RFT_ID requests */
|
||||
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
uint32_t rsvd0:16;
|
||||
uint32_t rsvd1:7;
|
||||
uint32_t fcpReg:1; /* Type 8 */
|
||||
uint32_t rsvd2:2;
|
||||
uint32_t ipReg:1; /* Type 5 */
|
||||
uint32_t rsvd3:5;
|
||||
#else /* __LITTLE_ENDIAN_BITFIELD */
|
||||
uint32_t rsvd0:16;
|
||||
uint32_t fcpReg:1; /* Type 8 */
|
||||
uint32_t rsvd1:7;
|
||||
uint32_t rsvd3:5;
|
||||
uint32_t ipReg:1; /* Type 5 */
|
||||
uint32_t rsvd2:2;
|
||||
#endif
|
||||
|
||||
uint32_t rsvd[7];
|
||||
__be32 fcp_reg; /* rsvd 31:9, fcp_reg 8, rsvd 7:0 */
|
||||
__be32 nvme_reg; /* rsvd 31:9, nvme_reg 8, rsvd 7:0 */
|
||||
__be32 word2;
|
||||
__be32 app_serv_reg; /* rsvd 31:1, app_serv_reg 0 */
|
||||
__be32 word[4];
|
||||
} rft;
|
||||
struct rnn {
|
||||
uint32_t PortId; /* For RNN_ID requests */
|
||||
@ -511,8 +511,6 @@ struct class_parms {
|
||||
uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */
|
||||
};
|
||||
|
||||
#define FAPWWN_KEY_VENDOR 0x42524344 /*valid vendor version fawwpn key*/
|
||||
|
||||
struct serv_parm { /* Structure is in Big Endian format */
|
||||
struct csp cmn;
|
||||
struct lpfc_name portName;
|
||||
@ -2650,19 +2648,26 @@ typedef struct {
|
||||
} READ_SPARM_VAR;
|
||||
|
||||
/* Structure for MB Command READ_STATUS (14) */
|
||||
enum read_status_word1 {
|
||||
RD_ST_CC = 0x01,
|
||||
RD_ST_XKB = 0x80,
|
||||
};
|
||||
|
||||
enum read_status_word17 {
|
||||
RD_ST_XMIT_XKB_MASK = 0x3fffff,
|
||||
};
|
||||
|
||||
enum read_status_word18 {
|
||||
RD_ST_RCV_XKB_MASK = 0x3fffff,
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
uint32_t rsvd1:31;
|
||||
uint32_t clrCounters:1;
|
||||
uint16_t activeXriCnt;
|
||||
uint16_t activeRpiCnt;
|
||||
#else /* __LITTLE_ENDIAN_BITFIELD */
|
||||
uint32_t clrCounters:1;
|
||||
uint32_t rsvd1:31;
|
||||
uint16_t activeRpiCnt;
|
||||
uint16_t activeXriCnt;
|
||||
#endif
|
||||
u8 clear_counters; /* rsvd 7:1, cc 0 */
|
||||
u8 rsvd5;
|
||||
u8 rsvd6;
|
||||
u8 xkb; /* xkb 7, rsvd 6:0 */
|
||||
|
||||
u32 rsvd8;
|
||||
|
||||
uint32_t xmitByteCnt;
|
||||
uint32_t rcvByteCnt;
|
||||
@ -2674,6 +2679,14 @@ typedef struct {
|
||||
uint32_t totalRespExchanges;
|
||||
uint32_t rcvPbsyCnt;
|
||||
uint32_t rcvFbsyCnt;
|
||||
|
||||
u32 drop_frame_no_rq;
|
||||
u32 empty_rq;
|
||||
u32 drop_frame_no_xri;
|
||||
u32 empty_xri;
|
||||
|
||||
u32 xmit_xkb; /* rsvd 31:22, xmit_xkb 21:0 */
|
||||
u32 rcv_xkb; /* rsvd 31:22, rcv_xkb 21:0 */
|
||||
} READ_STATUS_VAR;
|
||||
|
||||
/* Structure for MB Command READ_RPI (15) */
|
||||
|
@ -2893,6 +2893,9 @@ struct lpfc_mbx_read_config {
|
||||
#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
|
||||
#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
|
||||
#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
|
||||
#define lpfc_mbx_rd_conf_fawwpn_SHIFT 30
|
||||
#define lpfc_mbx_rd_conf_fawwpn_MASK 0x00000001
|
||||
#define lpfc_mbx_rd_conf_fawwpn_WORD word1
|
||||
#define lpfc_mbx_rd_conf_wcs_SHIFT 28 /* warning signaling */
|
||||
#define lpfc_mbx_rd_conf_wcs_MASK 0x00000001
|
||||
#define lpfc_mbx_rd_conf_wcs_WORD word1
|
||||
@ -4473,12 +4476,8 @@ struct wqe_common {
|
||||
#define wqe_cmd_type_MASK 0x0000000f
|
||||
#define wqe_cmd_type_WORD word11
|
||||
#define wqe_els_id_SHIFT 4
|
||||
#define wqe_els_id_MASK 0x00000003
|
||||
#define wqe_els_id_MASK 0x00000007
|
||||
#define wqe_els_id_WORD word11
|
||||
#define LPFC_ELS_ID_FLOGI 3
|
||||
#define LPFC_ELS_ID_FDISC 2
|
||||
#define LPFC_ELS_ID_LOGO 1
|
||||
#define LPFC_ELS_ID_DEFAULT 0
|
||||
#define wqe_irsp_SHIFT 4
|
||||
#define wqe_irsp_MASK 0x00000001
|
||||
#define wqe_irsp_WORD word11
|
||||
@ -4525,6 +4524,14 @@ struct lpfc_wqe_generic{
|
||||
uint32_t payload[4];
|
||||
};
|
||||
|
||||
enum els_request64_wqe_word11 {
|
||||
LPFC_ELS_ID_DEFAULT,
|
||||
LPFC_ELS_ID_LOGO,
|
||||
LPFC_ELS_ID_FDISC,
|
||||
LPFC_ELS_ID_FLOGI,
|
||||
LPFC_ELS_ID_PLOGI,
|
||||
};
|
||||
|
||||
struct els_request64_wqe {
|
||||
struct ulp_bde64 bde;
|
||||
uint32_t payload_len;
|
||||
|
@ -350,8 +350,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
|
||||
void
|
||||
lpfc_update_vport_wwn(struct lpfc_vport *vport)
|
||||
{
|
||||
uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
|
||||
u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
|
||||
/*
|
||||
* If the name is empty or there exists a soft name
|
||||
@ -370,18 +369,29 @@ lpfc_update_vport_wwn(struct lpfc_vport *vport)
|
||||
*/
|
||||
if (vport->fc_portname.u.wwn[0] != 0 &&
|
||||
memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
|
||||
sizeof(struct lpfc_name)))
|
||||
sizeof(struct lpfc_name))) {
|
||||
vport->vport_flag |= FAWWPN_PARAM_CHG;
|
||||
|
||||
if (vport->fc_portname.u.wwn[0] == 0 ||
|
||||
(vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
|
||||
vport->vport_flag & FAWWPN_SET) {
|
||||
if (phba->sli_rev == LPFC_SLI_REV4 &&
|
||||
vport->port_type == LPFC_PHYSICAL_PORT &&
|
||||
phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
|
||||
lpfc_printf_log(phba, KERN_INFO,
|
||||
LOG_SLI | LOG_DISCOVERY | LOG_ELS,
|
||||
"2701 FA-PWWN change WWPN from %llx to "
|
||||
"%llx: vflag x%x fawwpn_flag x%x\n",
|
||||
wwn_to_u64(vport->fc_portname.u.wwn),
|
||||
wwn_to_u64
|
||||
(vport->fc_sparam.portName.u.wwn),
|
||||
vport->vport_flag,
|
||||
phba->sli4_hba.fawwpn_flag);
|
||||
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
|
||||
sizeof(struct lpfc_name));
|
||||
vport->vport_flag &= ~FAWWPN_SET;
|
||||
if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
|
||||
vport->vport_flag |= FAWWPN_SET;
|
||||
}
|
||||
}
|
||||
|
||||
if (vport->fc_portname.u.wwn[0] == 0)
|
||||
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
|
||||
sizeof(struct lpfc_name));
|
||||
else
|
||||
memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
|
||||
sizeof(struct lpfc_name));
|
||||
@ -443,15 +453,16 @@ lpfc_config_port_post(struct lpfc_hba *phba)
|
||||
"READ_SPARM mbxStatus x%x\n",
|
||||
mb->mbxCommand, mb->mbxStatus);
|
||||
phba->link_state = LPFC_HBA_ERROR;
|
||||
mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
|
||||
|
||||
/* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
|
||||
* longer needed. Prevent unintended ctx_buf access as the mbox is
|
||||
* reused.
|
||||
*/
|
||||
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
@ -686,8 +697,14 @@ lpfc_sli4_refresh_params(struct lpfc_hba *phba)
|
||||
return rc;
|
||||
}
|
||||
mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
|
||||
|
||||
/* Are we forcing MI off via module parameter? */
|
||||
if (phba->cfg_enable_mi)
|
||||
phba->sli4_hba.pc_sli4_params.mi_ver =
|
||||
bf_get(cfg_mi_ver, mbx_sli4_parameters);
|
||||
else
|
||||
phba->sli4_hba.pc_sli4_params.mi_ver = 0;
|
||||
|
||||
phba->sli4_hba.pc_sli4_params.cmf =
|
||||
bf_get(cfg_cmf, mbx_sli4_parameters);
|
||||
phba->sli4_hba.pc_sli4_params.pls =
|
||||
@ -2176,7 +2193,6 @@ lpfc_handle_latt(struct lpfc_hba *phba)
|
||||
struct lpfc_sli *psli = &phba->sli;
|
||||
LPFC_MBOXQ_t *pmb;
|
||||
volatile uint32_t control;
|
||||
struct lpfc_dmabuf *mp;
|
||||
int rc = 0;
|
||||
|
||||
pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
@ -2185,23 +2201,17 @@ lpfc_handle_latt(struct lpfc_hba *phba)
|
||||
goto lpfc_handle_latt_err_exit;
|
||||
}
|
||||
|
||||
mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
|
||||
if (!mp) {
|
||||
rc = lpfc_mbox_rsrc_prep(phba, pmb);
|
||||
if (rc) {
|
||||
rc = 2;
|
||||
goto lpfc_handle_latt_free_pmb;
|
||||
}
|
||||
|
||||
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
|
||||
if (!mp->virt) {
|
||||
rc = 3;
|
||||
goto lpfc_handle_latt_free_mp;
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
goto lpfc_handle_latt_err_exit;
|
||||
}
|
||||
|
||||
/* Cleanup any outstanding ELS commands */
|
||||
lpfc_els_flush_all_cmd(phba);
|
||||
|
||||
psli->slistat.link_event++;
|
||||
lpfc_read_topology(phba, pmb, mp);
|
||||
lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
|
||||
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
|
||||
pmb->vport = vport;
|
||||
/* Block ELS IOCBs until we have processed this mbox command */
|
||||
@ -2222,11 +2232,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
|
||||
|
||||
lpfc_handle_latt_free_mbuf:
|
||||
phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
lpfc_handle_latt_free_mp:
|
||||
kfree(mp);
|
||||
lpfc_handle_latt_free_pmb:
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
lpfc_handle_latt_err_exit:
|
||||
/* Enable Link attention interrupts */
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
@ -4317,9 +4323,10 @@ lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"6074 Current allocated XRI sgl count:%d, "
|
||||
"maximum XRI count:%d\n",
|
||||
"maximum XRI count:%d els_xri_cnt:%d\n\n",
|
||||
phba->sli4_hba.io_xri_cnt,
|
||||
phba->sli4_hba.io_xri_max);
|
||||
phba->sli4_hba.io_xri_max,
|
||||
els_xri_cnt);
|
||||
|
||||
cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
|
||||
|
||||
@ -4458,12 +4465,11 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
|
||||
}
|
||||
pwqeq->sli4_lxritag = lxri;
|
||||
pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
|
||||
pwqeq->context1 = lpfc_ncmd;
|
||||
|
||||
/* Initialize local short-hand pointers. */
|
||||
lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
|
||||
lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
|
||||
lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
|
||||
lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
|
||||
spin_lock_init(&lpfc_ncmd->buf_lock);
|
||||
|
||||
/* add the nvme buffer to a post list */
|
||||
@ -4472,7 +4478,9 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
|
||||
}
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
|
||||
"6114 Allocate %d out of %d requested new NVME "
|
||||
"buffers\n", bcnt, num_to_alloc);
|
||||
"buffers of size x%zu bytes\n", bcnt, num_to_alloc,
|
||||
sizeof(*lpfc_ncmd));
|
||||
|
||||
|
||||
/* post the list of nvme buffer sgls to port if available */
|
||||
if (!list_empty(&post_nblist))
|
||||
@ -5307,7 +5315,6 @@ static void
|
||||
lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
|
||||
struct lpfc_acqe_link *acqe_link)
|
||||
{
|
||||
struct lpfc_dmabuf *mp;
|
||||
LPFC_MBOXQ_t *pmb;
|
||||
MAILBOX_t *mb;
|
||||
struct lpfc_mbx_read_top *la;
|
||||
@ -5324,18 +5331,13 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
|
||||
"0395 The mboxq allocation failed\n");
|
||||
return;
|
||||
}
|
||||
mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
|
||||
if (!mp) {
|
||||
|
||||
rc = lpfc_mbox_rsrc_prep(phba, pmb);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"0396 The lpfc_dmabuf allocation failed\n");
|
||||
"0396 mailbox allocation failed\n");
|
||||
goto out_free_pmb;
|
||||
}
|
||||
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
|
||||
if (!mp->virt) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"0397 The mbuf allocation failed\n");
|
||||
goto out_free_dmabuf;
|
||||
}
|
||||
|
||||
/* Cleanup any outstanding ELS commands */
|
||||
lpfc_els_flush_all_cmd(phba);
|
||||
@ -5347,7 +5349,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
|
||||
phba->sli.slistat.link_event++;
|
||||
|
||||
/* Create lpfc_handle_latt mailbox command from link ACQE */
|
||||
lpfc_read_topology(phba, pmb, mp);
|
||||
lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
|
||||
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
|
||||
pmb->vport = phba->pport;
|
||||
|
||||
@ -5385,10 +5387,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
|
||||
*/
|
||||
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
|
||||
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED) {
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
goto out_free_dmabuf;
|
||||
}
|
||||
if (rc == MBX_NOT_FINISHED)
|
||||
goto out_free_pmb;
|
||||
return;
|
||||
}
|
||||
/*
|
||||
@ -5423,10 +5423,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
|
||||
|
||||
return;
|
||||
|
||||
out_free_dmabuf:
|
||||
kfree(mp);
|
||||
out_free_pmb:
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -5533,7 +5531,7 @@ lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
|
||||
struct tm broken;
|
||||
struct timespec64 cur_time;
|
||||
u32 cnt;
|
||||
u16 value;
|
||||
u32 value;
|
||||
|
||||
/* Make sure we have a congestion info buffer */
|
||||
if (!phba->cgn_i)
|
||||
@ -5866,20 +5864,7 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
|
||||
|
||||
/* Use the frequency found in the last rcv'ed FPIN */
|
||||
value = phba->cgn_fpin_frequency;
|
||||
if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN)
|
||||
cp->cgn_warn_freq = cpu_to_le16(value);
|
||||
if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM)
|
||||
cp->cgn_alarm_freq = cpu_to_le16(value);
|
||||
|
||||
/* Frequency (in ms) Signal Warning/Signal Congestion Notifications
|
||||
* are received by the HBA
|
||||
*/
|
||||
value = phba->cgn_sig_freq;
|
||||
|
||||
if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
|
||||
phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
|
||||
cp->cgn_warn_freq = cpu_to_le16(value);
|
||||
if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
|
||||
cp->cgn_alarm_freq = cpu_to_le16(value);
|
||||
|
||||
lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
|
||||
@ -6237,7 +6222,6 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
|
||||
static void
|
||||
lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
|
||||
{
|
||||
struct lpfc_dmabuf *mp;
|
||||
LPFC_MBOXQ_t *pmb;
|
||||
MAILBOX_t *mb;
|
||||
struct lpfc_mbx_read_top *la;
|
||||
@ -6297,18 +6281,12 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
|
||||
"2897 The mboxq allocation failed\n");
|
||||
return;
|
||||
}
|
||||
mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
|
||||
if (!mp) {
|
||||
rc = lpfc_mbox_rsrc_prep(phba, pmb);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"2898 The lpfc_dmabuf allocation failed\n");
|
||||
"2898 The mboxq prep failed\n");
|
||||
goto out_free_pmb;
|
||||
}
|
||||
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
|
||||
if (!mp->virt) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"2899 The mbuf allocation failed\n");
|
||||
goto out_free_dmabuf;
|
||||
}
|
||||
|
||||
/* Cleanup any outstanding ELS commands */
|
||||
lpfc_els_flush_all_cmd(phba);
|
||||
@ -6320,7 +6298,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
|
||||
phba->sli.slistat.link_event++;
|
||||
|
||||
/* Create lpfc_handle_latt mailbox command from link ACQE */
|
||||
lpfc_read_topology(phba, pmb, mp);
|
||||
lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
|
||||
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
|
||||
pmb->vport = phba->pport;
|
||||
|
||||
@ -6364,16 +6342,12 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
|
||||
}
|
||||
|
||||
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED) {
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
goto out_free_dmabuf;
|
||||
}
|
||||
if (rc == MBX_NOT_FINISHED)
|
||||
goto out_free_pmb;
|
||||
return;
|
||||
|
||||
out_free_dmabuf:
|
||||
kfree(mp);
|
||||
out_free_pmb:
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -6565,12 +6539,15 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
|
||||
case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
|
||||
/* Misconfigured WWN. Reports that the SLI Port is configured
|
||||
* to use FA-WWN, but the attached device doesn’t support it.
|
||||
* No driver action is required.
|
||||
* Event Data1 - N.A, Event Data2 - N.A
|
||||
* This event only happens on the physical port.
|
||||
*/
|
||||
lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
|
||||
"2699 Misconfigured FA-WWN - Attached device does "
|
||||
"not support FA-WWN\n");
|
||||
lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
|
||||
"2699 Misconfigured FA-PWWN - Attached device "
|
||||
"does not support FA-PWWN\n");
|
||||
phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
|
||||
memset(phba->pport->fc_portname.u.wwn, 0,
|
||||
sizeof(struct lpfc_name));
|
||||
break;
|
||||
case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
|
||||
/* EEPROM failure. No driver action is required */
|
||||
@ -6595,9 +6572,6 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
|
||||
/* Alarm overrides warning, so check that first */
|
||||
if (cgn_signal->alarm_cnt) {
|
||||
if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
|
||||
/* Keep track of alarm cnt for cgn_info */
|
||||
atomic_add(cgn_signal->alarm_cnt,
|
||||
&phba->cgn_fabric_alarm_cnt);
|
||||
/* Keep track of alarm cnt for CMF_SYNC_WQE */
|
||||
atomic_add(cgn_signal->alarm_cnt,
|
||||
&phba->cgn_sync_alarm_cnt);
|
||||
@ -6606,8 +6580,6 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
|
||||
/* signal action needs to be taken */
|
||||
if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
|
||||
phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
|
||||
/* Keep track of warning cnt for cgn_info */
|
||||
atomic_add(cnt, &phba->cgn_fabric_warn_cnt);
|
||||
/* Keep track of warning cnt for CMF_SYNC_WQE */
|
||||
atomic_add(cnt, &phba->cgn_sync_warn_cnt);
|
||||
}
|
||||
@ -8027,6 +7999,18 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
rc = lpfc_sli4_read_config(phba);
|
||||
if (unlikely(rc))
|
||||
goto out_free_bsmbx;
|
||||
|
||||
if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
|
||||
/* Right now the link is down, if FA-PWWN is configured the
|
||||
* firmware will try FLOGI before the driver gets a link up.
|
||||
* If it fails, the driver should get a MISCONFIGURED async
|
||||
* event which will clear this flag. The only notification
|
||||
* the driver gets is if it fails, if it succeeds there is no
|
||||
* notification given. Assume success.
|
||||
*/
|
||||
phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
|
||||
}
|
||||
|
||||
rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
|
||||
if (unlikely(rc))
|
||||
goto out_free_bsmbx;
|
||||
@ -9000,6 +8984,36 @@ lpfc_hba_free(struct lpfc_hba *phba)
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
|
||||
* @vport: pointer to lpfc vport data structure.
|
||||
*
|
||||
* This routine is will setup initial FDMI attribute masks for
|
||||
* FDMI2 or SmartSAN depending on module parameters. The driver will attempt
|
||||
* to get these attributes first before falling back, the attribute
|
||||
* fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
|
||||
**/
|
||||
void
|
||||
lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
|
||||
vport->load_flag |= FC_ALLOW_FDMI;
|
||||
if (phba->cfg_enable_SmartSAN ||
|
||||
phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
|
||||
/* Setup appropriate attribute masks */
|
||||
vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
|
||||
if (phba->cfg_enable_SmartSAN)
|
||||
vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
|
||||
else
|
||||
vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
|
||||
}
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
|
||||
"6077 Setup FDMI mask: hba x%x port x%x\n",
|
||||
vport->fdmi_hba_mask, vport->fdmi_port_mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_create_shost - Create hba physical port with associated scsi host.
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
@ -9043,21 +9057,12 @@ lpfc_create_shost(struct lpfc_hba *phba)
|
||||
/* Put reference to SCSI host to driver's device private data */
|
||||
pci_set_drvdata(phba->pcidev, shost);
|
||||
|
||||
lpfc_setup_fdmi_mask(vport);
|
||||
|
||||
/*
|
||||
* At this point we are fully registered with PSA. In addition,
|
||||
* any initial discovery should be completed.
|
||||
*/
|
||||
vport->load_flag |= FC_ALLOW_FDMI;
|
||||
if (phba->cfg_enable_SmartSAN ||
|
||||
(phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
|
||||
|
||||
/* Setup appropriate attribute masks */
|
||||
vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
|
||||
if (phba->cfg_enable_SmartSAN)
|
||||
vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
|
||||
else
|
||||
vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9830,7 +9835,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
|
||||
struct lpfc_rsrc_desc_fcfcoe *desc;
|
||||
char *pdesc_0;
|
||||
uint16_t forced_link_speed;
|
||||
uint32_t if_type, qmin;
|
||||
uint32_t if_type, qmin, fawwpn;
|
||||
int length, i, rc = 0, rc2;
|
||||
|
||||
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
@ -9872,10 +9877,23 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
|
||||
phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
|
||||
}
|
||||
|
||||
fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config);
|
||||
|
||||
if (fawwpn) {
|
||||
lpfc_printf_log(phba, KERN_INFO,
|
||||
LOG_INIT | LOG_DISCOVERY,
|
||||
"2702 READ_CONFIG: FA-PWWN is "
|
||||
"configured on\n");
|
||||
phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
|
||||
} else {
|
||||
phba->sli4_hba.fawwpn_flag = 0;
|
||||
}
|
||||
|
||||
phba->sli4_hba.conf_trunk =
|
||||
bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
|
||||
phba->sli4_hba.extents_in_use =
|
||||
bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
|
||||
|
||||
phba->sli4_hba.max_cfg_param.max_xri =
|
||||
bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
|
||||
/* Reduce resource usage in kdump environment */
|
||||
@ -14832,9 +14850,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
/* Check if there are static vports to be created. */
|
||||
lpfc_create_static_vport(phba);
|
||||
|
||||
/* Enable RAS FW log support */
|
||||
lpfc_sli4_ras_setup(phba);
|
||||
|
||||
timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
|
||||
cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
|
||||
|
||||
@ -15700,34 +15715,7 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba)
|
||||
unsigned int temp_idx;
|
||||
int i;
|
||||
int j = 0;
|
||||
unsigned long rem_nsec, iflags;
|
||||
bool log_verbose = false;
|
||||
struct lpfc_vport *port_iterator;
|
||||
|
||||
/* Don't dump messages if we explicitly set log_verbose for the
|
||||
* physical port or any vport.
|
||||
*/
|
||||
if (phba->cfg_log_verbose)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&phba->port_list_lock, iflags);
|
||||
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
|
||||
if (port_iterator->load_flag & FC_UNLOADING)
|
||||
continue;
|
||||
if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
|
||||
if (port_iterator->cfg_log_verbose)
|
||||
log_verbose = true;
|
||||
|
||||
scsi_host_put(lpfc_shost_from_vport(port_iterator));
|
||||
|
||||
if (log_verbose) {
|
||||
spin_unlock_irqrestore(&phba->port_list_lock,
|
||||
iflags);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->port_list_lock, iflags);
|
||||
unsigned long rem_nsec;
|
||||
|
||||
if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
|
||||
return;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@ -73,7 +73,7 @@ do { \
|
||||
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
|
||||
do { \
|
||||
{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) { \
|
||||
if ((mask) & LOG_TRACE_EVENT) \
|
||||
if ((mask) & LOG_TRACE_EVENT && !(vport)->cfg_log_verbose) \
|
||||
lpfc_dmp_dbg((vport)->phba); \
|
||||
dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
|
||||
fmt, (vport)->phba->brd_no, vport->vpi, ##arg); \
|
||||
@ -89,11 +89,11 @@ do { \
|
||||
(phba)->pport->cfg_log_verbose : \
|
||||
(phba)->cfg_log_verbose; \
|
||||
if (((mask) & log_verbose) || (level[1] <= '3')) { \
|
||||
if ((mask) & LOG_TRACE_EVENT) \
|
||||
if ((mask) & LOG_TRACE_EVENT && !log_verbose) \
|
||||
lpfc_dmp_dbg(phba); \
|
||||
dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
|
||||
fmt, phba->brd_no, ##arg); \
|
||||
} else if (!(phba)->cfg_log_verbose)\
|
||||
} else if (!log_verbose)\
|
||||
lpfc_dbg_print(phba, "%d:" fmt, phba->brd_no, ##arg); \
|
||||
} \
|
||||
} while (0)
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@ -43,6 +43,80 @@
|
||||
#include "lpfc_crtn.h"
|
||||
#include "lpfc_compat.h"
|
||||
|
||||
/**
|
||||
* lpfc_mbox_rsrc_prep - Prepare a mailbox with DMA buffer memory.
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @mbox: pointer to the driver internal queue element for mailbox command.
|
||||
*
|
||||
* A mailbox command consists of the pool memory for the command, @mbox, and
|
||||
* one or more DMA buffers for the data transfer. This routine provides
|
||||
* a standard framework for allocating the dma buffer and assigning to the
|
||||
* @mbox. Callers should cleanup the mbox with a call to
|
||||
* lpfc_mbox_rsrc_cleanup.
|
||||
*
|
||||
* The lpfc_mbuf_alloc routine acquires the hbalock so the caller is
|
||||
* responsible to ensure the hbalock is released. Also note that the
|
||||
* driver design is a single dmabuf/mbuf per mbox in the ctx_buf.
|
||||
*
|
||||
**/
|
||||
int
|
||||
lpfc_mbox_rsrc_prep(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
|
||||
{
|
||||
struct lpfc_dmabuf *mp;
|
||||
|
||||
mp = kmalloc(sizeof(*mp), GFP_KERNEL);
|
||||
if (!mp)
|
||||
return -ENOMEM;
|
||||
|
||||
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
|
||||
if (!mp->virt) {
|
||||
kfree(mp);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(mp->virt, 0, LPFC_BPL_SIZE);
|
||||
|
||||
/* Initialization only. Driver does not use a list of dmabufs. */
|
||||
INIT_LIST_HEAD(&mp->list);
|
||||
mbox->ctx_buf = mp;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_mbox_rsrc_cleanup - Free the mailbox DMA buffer and virtual memory.
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @mbox: pointer to the driver internal queue element for mailbox command.
|
||||
* @locked: value that indicates if the hbalock is held (1) or not (0).
|
||||
*
|
||||
* A mailbox command consists of the pool memory for the command, @mbox, and
|
||||
* possibly a DMA buffer for the data transfer. This routine provides
|
||||
* a standard framework for releasing any dma buffers and freeing all
|
||||
* memory resources in it as well as releasing the @mbox back to the @phba pool.
|
||||
* Callers should use this routine for cleanup for all mailboxes prepped with
|
||||
* lpfc_mbox_rsrc_prep.
|
||||
*
|
||||
**/
|
||||
void
|
||||
lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
|
||||
enum lpfc_mbox_ctx locked)
|
||||
{
|
||||
struct lpfc_dmabuf *mp;
|
||||
|
||||
mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
|
||||
mbox->ctx_buf = NULL;
|
||||
|
||||
/* Release the generic BPL buffer memory. */
|
||||
if (mp) {
|
||||
if (locked == MBOX_THD_LOCKED)
|
||||
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
else
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
}
|
||||
|
||||
mempool_free(mbox, phba->mbox_mem_pool);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_dump_static_vport - Dump HBA's static vport information.
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
@ -61,6 +135,7 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
|
||||
{
|
||||
MAILBOX_t *mb;
|
||||
struct lpfc_dmabuf *mp;
|
||||
int rc;
|
||||
|
||||
mb = &pmb->u.mb;
|
||||
|
||||
@ -79,22 +154,15 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* For SLI4 HBAs driver need to allocate memory */
|
||||
mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
|
||||
if (mp)
|
||||
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
|
||||
|
||||
if (!mp || !mp->virt) {
|
||||
kfree(mp);
|
||||
rc = lpfc_mbox_rsrc_prep(phba, pmb);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
|
||||
"2605 lpfc_dump_static_vport: memory"
|
||||
" allocation failed\n");
|
||||
"2605 %s: memory allocation failed\n",
|
||||
__func__);
|
||||
return 1;
|
||||
}
|
||||
memset(mp->virt, 0, LPFC_BPL_SIZE);
|
||||
INIT_LIST_HEAD(&mp->list);
|
||||
/* save address for completion */
|
||||
pmb->ctx_buf = (uint8_t *)mp;
|
||||
|
||||
mp = pmb->ctx_buf;
|
||||
mb->un.varWords[3] = putPaddrLow(mp->phys);
|
||||
mb->un.varWords[4] = putPaddrHigh(mp->phys);
|
||||
mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
|
||||
@ -606,26 +674,21 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
|
||||
{
|
||||
struct lpfc_dmabuf *mp;
|
||||
MAILBOX_t *mb;
|
||||
int rc;
|
||||
|
||||
mb = &pmb->u.mb;
|
||||
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
|
||||
|
||||
mb->mbxOwner = OWN_HOST;
|
||||
|
||||
/* Get a buffer to hold the HBAs Service Parameters */
|
||||
|
||||
mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
|
||||
if (mp)
|
||||
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
|
||||
if (!mp || !mp->virt) {
|
||||
kfree(mp);
|
||||
mb->mbxCommand = MBX_READ_SPARM64;
|
||||
/* READ_SPARAM: no buffers */
|
||||
rc = lpfc_mbox_rsrc_prep(phba, pmb);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
|
||||
"0301 READ_SPARAM: no buffers\n");
|
||||
return (1);
|
||||
return 1;
|
||||
}
|
||||
INIT_LIST_HEAD(&mp->list);
|
||||
|
||||
mp = pmb->ctx_buf;
|
||||
mb = &pmb->u.mb;
|
||||
mb->mbxOwner = OWN_HOST;
|
||||
mb->mbxCommand = MBX_READ_SPARM64;
|
||||
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
|
||||
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
|
||||
@ -633,9 +696,6 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
|
||||
if (phba->sli_rev >= LPFC_SLI_REV3)
|
||||
mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
|
||||
|
||||
/* save address for completion */
|
||||
pmb->ctx_buf = mp;
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -756,6 +816,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
|
||||
MAILBOX_t *mb = &pmb->u.mb;
|
||||
uint8_t *sparam;
|
||||
struct lpfc_dmabuf *mp;
|
||||
int rc;
|
||||
|
||||
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
|
||||
|
||||
@ -766,12 +827,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
|
||||
mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
|
||||
mb->un.varRegLogin.did = did;
|
||||
mb->mbxOwner = OWN_HOST;
|
||||
|
||||
/* Get a buffer to hold NPorts Service Parameters */
|
||||
mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
|
||||
if (mp)
|
||||
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
|
||||
if (!mp || !mp->virt) {
|
||||
kfree(mp);
|
||||
rc = lpfc_mbox_rsrc_prep(phba, pmb);
|
||||
if (rc) {
|
||||
mb->mbxCommand = MBX_REG_LOGIN64;
|
||||
/* REG_LOGIN: no buffers */
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
|
||||
@ -779,15 +838,13 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
|
||||
"rpi x%x\n", vpi, did, rpi);
|
||||
return 1;
|
||||
}
|
||||
INIT_LIST_HEAD(&mp->list);
|
||||
sparam = mp->virt;
|
||||
|
||||
/* Copy param's into a new buffer */
|
||||
mp = pmb->ctx_buf;
|
||||
sparam = mp->virt;
|
||||
memcpy(sparam, param, sizeof (struct serv_parm));
|
||||
|
||||
/* save address for completion */
|
||||
pmb->ctx_buf = (uint8_t *)mp;
|
||||
|
||||
/* Finish initializing the mailbox. */
|
||||
mb->mbxCommand = MBX_REG_LOGIN64;
|
||||
mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
|
||||
mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
|
||||
@ -1723,7 +1780,9 @@ lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @mbox: pointer to lpfc mbox command.
|
||||
*
|
||||
* This routine frees SLI4 specific mailbox command for sending IOCTL command.
|
||||
* This routine cleans up and releases an SLI4 mailbox command that was
|
||||
* configured using lpfc_sli4_config. It accounts for the embedded and
|
||||
* non-embedded config types.
|
||||
**/
|
||||
void
|
||||
lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
|
||||
@ -2277,33 +2336,24 @@ lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
|
||||
{
|
||||
struct lpfc_dmabuf *mp = NULL;
|
||||
MAILBOX_t *mb;
|
||||
int rc;
|
||||
|
||||
memset(mbox, 0, sizeof(*mbox));
|
||||
mb = &mbox->u.mb;
|
||||
|
||||
mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
|
||||
if (mp)
|
||||
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
|
||||
|
||||
if (!mp || !mp->virt) {
|
||||
kfree(mp);
|
||||
/* dump config region 23 failed to allocate memory */
|
||||
rc = lpfc_mbox_rsrc_prep(phba, mbox);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
|
||||
"2569 lpfc dump config region 23: memory"
|
||||
" allocation failed\n");
|
||||
"2569 %s: memory allocation failed\n",
|
||||
__func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
memset(mp->virt, 0, LPFC_BPL_SIZE);
|
||||
INIT_LIST_HEAD(&mp->list);
|
||||
|
||||
/* save address for completion */
|
||||
mbox->ctx_buf = (uint8_t *)mp;
|
||||
|
||||
mb->mbxCommand = MBX_DUMP_MEMORY;
|
||||
mb->un.varDmp.type = DMP_NV_PARAMS;
|
||||
mb->un.varDmp.region_id = DMP_REGION_23;
|
||||
mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
|
||||
mp = mbox->ctx_buf;
|
||||
mb->un.varWords[3] = putPaddrLow(mp->phys);
|
||||
mb->un.varWords[4] = putPaddrHigh(mp->phys);
|
||||
return 0;
|
||||
@ -2326,7 +2376,7 @@ lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
rc = SUCCESS;
|
||||
|
||||
mbx_failed:
|
||||
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||
lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
|
||||
rdp_context->cmpl(phba, rdp_context, rc);
|
||||
}
|
||||
|
||||
@ -2338,30 +2388,25 @@ lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
|
||||
(struct lpfc_rdp_context *)(mbox->ctx_ndlp);
|
||||
|
||||
if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
|
||||
goto error_mbuf_free;
|
||||
goto error_mbox_free;
|
||||
|
||||
lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
|
||||
DMP_SFF_PAGE_A2_SIZE);
|
||||
|
||||
/* We don't need dma buffer for link stat. */
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
|
||||
memset(mbox, 0, sizeof(*mbox));
|
||||
lpfc_read_lnk_stat(phba, mbox);
|
||||
mbox->vport = rdp_context->ndlp->vport;
|
||||
|
||||
/* Save the dma buffer for cleanup in the final completion. */
|
||||
mbox->ctx_buf = mp;
|
||||
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
|
||||
mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
|
||||
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
|
||||
goto error_cmd_free;
|
||||
goto error_mbox_free;
|
||||
|
||||
return;
|
||||
|
||||
error_mbuf_free:
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
error_cmd_free:
|
||||
lpfc_sli4_mbox_cmd_free(phba, mbox);
|
||||
error_mbox_free:
|
||||
lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
|
||||
rdp_context->cmpl(phba, rdp_context, FAILURE);
|
||||
}
|
||||
|
||||
@ -2409,9 +2454,7 @@ lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
|
||||
return;
|
||||
|
||||
error:
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
lpfc_sli4_mbox_cmd_free(phba, mbox);
|
||||
lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
|
||||
rdp_context->cmpl(phba, rdp_context, FAILURE);
|
||||
}
|
||||
|
||||
@ -2427,27 +2470,19 @@ error:
|
||||
int
|
||||
lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
|
||||
{
|
||||
int rc;
|
||||
struct lpfc_dmabuf *mp = NULL;
|
||||
|
||||
memset(mbox, 0, sizeof(*mbox));
|
||||
|
||||
mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
|
||||
if (mp)
|
||||
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
|
||||
if (!mp || !mp->virt) {
|
||||
kfree(mp);
|
||||
rc = lpfc_mbox_rsrc_prep(phba, mbox);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
|
||||
"3569 dump type 3 page 0xA0 allocation failed\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
memset(mp->virt, 0, LPFC_BPL_SIZE);
|
||||
INIT_LIST_HEAD(&mp->list);
|
||||
|
||||
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
|
||||
/* save address for completion */
|
||||
mbox->ctx_buf = mp;
|
||||
|
||||
bf_set(lpfc_mbx_memory_dump_type3_type,
|
||||
&mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
|
||||
bf_set(lpfc_mbx_memory_dump_type3_link,
|
||||
@ -2456,6 +2491,8 @@ lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
|
||||
&mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0);
|
||||
bf_set(lpfc_mbx_memory_dump_type3_length,
|
||||
&mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
|
||||
|
||||
mp = mbox->ctx_buf;
|
||||
mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
|
||||
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
|
||||
|
||||
|
@ -173,9 +173,9 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
void *ptr = NULL;
|
||||
u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
|
||||
|
||||
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
pcmd = cmdiocb->cmd_dmabuf;
|
||||
|
||||
/* For lpfc_els_abort, context2 could be zero'ed to delay
|
||||
/* For lpfc_els_abort, cmd_dmabuf could be zero'ed to delay
|
||||
* freeing associated memory till after ABTS completes.
|
||||
*/
|
||||
if (pcmd) {
|
||||
@ -327,7 +327,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_dmabuf *pcmd;
|
||||
struct lpfc_dmabuf *mp;
|
||||
uint64_t nlp_portwwn = 0;
|
||||
uint32_t *lp;
|
||||
union lpfc_wqe128 *wqe;
|
||||
@ -343,7 +342,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
u32 remote_did;
|
||||
|
||||
memset(&stat, 0, sizeof (struct ls_rjt));
|
||||
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
pcmd = cmdiocb->cmd_dmabuf;
|
||||
lp = (uint32_t *) pcmd->virt;
|
||||
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
|
||||
if (wwn_to_u64(sp->portName.u.wwn) == 0) {
|
||||
@ -514,6 +513,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
lpfc_config_link(phba, link_mbox);
|
||||
link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
link_mbox->vport = vport;
|
||||
|
||||
/* The default completion handling for CONFIG_LINK
|
||||
* does not require the ndlp so no reference is needed.
|
||||
*/
|
||||
link_mbox->ctx_ndlp = ndlp;
|
||||
|
||||
rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
|
||||
@ -592,12 +595,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
* a default RPI.
|
||||
*/
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
mp = (struct lpfc_dmabuf *)login_mbox->ctx_buf;
|
||||
if (mp) {
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
}
|
||||
mempool_free(login_mbox, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, login_mbox,
|
||||
MBOX_THD_UNLOCKED);
|
||||
login_mbox = NULL;
|
||||
} else {
|
||||
/* In order to preserve RPIs, we want to cleanup
|
||||
@ -615,8 +614,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
|
||||
rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
|
||||
ndlp, login_mbox);
|
||||
if (rc)
|
||||
mempool_free(login_mbox, phba->mbox_mem_pool);
|
||||
if (rc && login_mbox)
|
||||
lpfc_mbox_rsrc_cleanup(phba, login_mbox,
|
||||
MBOX_THD_UNLOCKED);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -637,6 +637,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
*/
|
||||
login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
|
||||
login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
|
||||
if (!login_mbox->ctx_ndlp)
|
||||
goto out;
|
||||
|
||||
login_mbox->context3 = save_iocb; /* For PLOGI ACC */
|
||||
|
||||
spin_lock_irq(&ndlp->lock);
|
||||
@ -645,8 +648,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
|
||||
/* Start the ball rolling by issuing REG_LOGIN here */
|
||||
rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED)
|
||||
if (rc == MBX_NOT_FINISHED) {
|
||||
lpfc_nlp_put(ndlp);
|
||||
goto out;
|
||||
}
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
|
||||
|
||||
return 1;
|
||||
@ -710,7 +715,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
uint32_t *lp;
|
||||
uint32_t cmd;
|
||||
|
||||
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
pcmd = cmdiocb->cmd_dmabuf;
|
||||
lp = (uint32_t *) pcmd->virt;
|
||||
|
||||
cmd = *lp++;
|
||||
@ -918,7 +923,7 @@ lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
|
||||
uint32_t *payload;
|
||||
uint32_t cmd;
|
||||
|
||||
payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
|
||||
payload = cmdiocb->cmd_dmabuf->virt;
|
||||
cmd = *payload;
|
||||
if (vport->phba->nvmet_support) {
|
||||
/* Must be a NVME PRLI */
|
||||
@ -955,9 +960,9 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
struct fc_rport *rport = ndlp->rport;
|
||||
u32 roles;
|
||||
|
||||
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
lp = (uint32_t *) pcmd->virt;
|
||||
npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
|
||||
pcmd = cmdiocb->cmd_dmabuf;
|
||||
lp = (uint32_t *)pcmd->virt;
|
||||
npr = (PRLI *)((uint8_t *)lp + sizeof(uint32_t));
|
||||
|
||||
if ((npr->prliType == PRLI_FCP_TYPE) ||
|
||||
(npr->prliType == PRLI_NVME_TYPE)) {
|
||||
@ -1103,9 +1108,11 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
|
||||
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag);
|
||||
|
||||
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED)
|
||||
if (rc == MBX_NOT_FINISHED) {
|
||||
lpfc_nlp_put(ndlp);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
@ -1218,7 +1225,7 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_iocbq *cmdiocb = arg;
|
||||
struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
|
||||
uint32_t *lp = (uint32_t *) pcmd->virt;
|
||||
struct serv_parm *sp = (struct serv_parm *) (lp + 1);
|
||||
struct ls_rjt stat;
|
||||
@ -1328,7 +1335,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_iocbq *cmdiocb, *rspiocb;
|
||||
struct lpfc_dmabuf *pcmd, *prsp, *mp;
|
||||
struct lpfc_dmabuf *pcmd, *prsp;
|
||||
uint32_t *lp;
|
||||
uint32_t vid, flag;
|
||||
struct serv_parm *sp;
|
||||
@ -1339,7 +1346,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
|
||||
u32 did;
|
||||
|
||||
cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
rspiocb = cmdiocb->context_un.rsp_iocb;
|
||||
rspiocb = cmdiocb->rsp_iocb;
|
||||
|
||||
ulp_status = get_job_ulpstatus(phba, rspiocb);
|
||||
|
||||
@ -1351,7 +1358,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
|
||||
if (ulp_status)
|
||||
goto out;
|
||||
|
||||
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
pcmd = cmdiocb->cmd_dmabuf;
|
||||
|
||||
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
|
||||
if (!prsp)
|
||||
@ -1495,11 +1502,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
|
||||
* command
|
||||
*/
|
||||
lpfc_nlp_put(ndlp);
|
||||
mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
mempool_free(mbox, phba->mbox_mem_pool);
|
||||
|
||||
lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"0134 PLOGI: cannot issue reg_login "
|
||||
"Data: x%x x%x x%x x%x\n",
|
||||
@ -1697,7 +1700,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
|
||||
u32 ulp_status;
|
||||
|
||||
cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
rspiocb = cmdiocb->context_un.rsp_iocb;
|
||||
rspiocb = cmdiocb->rsp_iocb;
|
||||
|
||||
ulp_status = get_job_ulpstatus(phba, rspiocb);
|
||||
|
||||
@ -1850,7 +1853,6 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
|
||||
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
LPFC_MBOXQ_t *mb;
|
||||
LPFC_MBOXQ_t *nextmb;
|
||||
struct lpfc_dmabuf *mp;
|
||||
struct lpfc_nodelist *ns_ndlp;
|
||||
|
||||
cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
@ -1870,16 +1872,11 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
|
||||
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
|
||||
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
|
||||
(ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
|
||||
mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
|
||||
if (mp) {
|
||||
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
}
|
||||
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
|
||||
lpfc_nlp_put(ndlp);
|
||||
list_del(&mb->list);
|
||||
phba->sli.mboxq_cnt--;
|
||||
mempool_free(mb, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
@ -2152,7 +2149,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
u32 ulp_status;
|
||||
|
||||
cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
rspiocb = cmdiocb->context_un.rsp_iocb;
|
||||
rspiocb = cmdiocb->rsp_iocb;
|
||||
|
||||
ulp_status = get_job_ulpstatus(phba, rspiocb);
|
||||
|
||||
@ -2772,7 +2769,7 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
u32 ulp_status;
|
||||
|
||||
cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
rspiocb = cmdiocb->context_un.rsp_iocb;
|
||||
rspiocb = cmdiocb->rsp_iocb;
|
||||
|
||||
ulp_status = get_job_ulpstatus(phba, rspiocb);
|
||||
|
||||
@ -2791,7 +2788,7 @@ lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
u32 ulp_status;
|
||||
|
||||
cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
rspiocb = cmdiocb->context_un.rsp_iocb;
|
||||
rspiocb = cmdiocb->rsp_iocb;
|
||||
|
||||
ulp_status = get_job_ulpstatus(phba, rspiocb);
|
||||
|
||||
@ -2827,7 +2824,7 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
u32 ulp_status;
|
||||
|
||||
cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
rspiocb = cmdiocb->context_un.rsp_iocb;
|
||||
rspiocb = cmdiocb->rsp_iocb;
|
||||
|
||||
ulp_status = get_job_ulpstatus(phba, rspiocb);
|
||||
|
||||
|
@ -319,8 +319,10 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
|
||||
struct lpfc_nodelist *ndlp;
|
||||
uint32_t status;
|
||||
|
||||
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
|
||||
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
|
||||
pnvme_lsreq = cmdwqe->context_un.nvme_lsreq;
|
||||
ndlp = cmdwqe->ndlp;
|
||||
buf_ptr = cmdwqe->bpl_dmabuf;
|
||||
|
||||
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
@ -330,16 +332,16 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
|
||||
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
|
||||
cmdwqe->sli4_xritag, status,
|
||||
(wcqe->parameter & 0xffff),
|
||||
cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
|
||||
cmdwqe, pnvme_lsreq, cmdwqe->bpl_dmabuf,
|
||||
ndlp);
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
|
||||
cmdwqe->sli4_xritag, status, wcqe->parameter);
|
||||
|
||||
if (cmdwqe->context3) {
|
||||
buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
|
||||
if (buf_ptr) {
|
||||
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
|
||||
kfree(buf_ptr);
|
||||
cmdwqe->context3 = NULL;
|
||||
cmdwqe->bpl_dmabuf = NULL;
|
||||
}
|
||||
if (pnvme_lsreq->done)
|
||||
pnvme_lsreq->done(pnvme_lsreq, status);
|
||||
@ -351,7 +353,7 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
|
||||
cmdwqe->sli4_xritag, status);
|
||||
if (ndlp) {
|
||||
lpfc_nlp_put(ndlp);
|
||||
cmdwqe->context1 = NULL;
|
||||
cmdwqe->ndlp = NULL;
|
||||
}
|
||||
lpfc_sli_release_iocbq(phba, cmdwqe);
|
||||
}
|
||||
@ -407,19 +409,19 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
|
||||
/* Initialize only 64 bytes */
|
||||
memset(wqe, 0, sizeof(union lpfc_wqe));
|
||||
|
||||
genwqe->context3 = (uint8_t *)bmp;
|
||||
genwqe->bpl_dmabuf = bmp;
|
||||
genwqe->cmd_flag |= LPFC_IO_NVME_LS;
|
||||
|
||||
/* Save for completion so we can release these resources */
|
||||
genwqe->context1 = lpfc_nlp_get(ndlp);
|
||||
if (!genwqe->context1) {
|
||||
genwqe->ndlp = lpfc_nlp_get(ndlp);
|
||||
if (!genwqe->ndlp) {
|
||||
dev_warn(&phba->pcidev->dev,
|
||||
"Warning: Failed node ref, not sending LS_REQ\n");
|
||||
lpfc_sli_release_iocbq(phba, genwqe);
|
||||
return 1;
|
||||
}
|
||||
|
||||
genwqe->context2 = (uint8_t *)pnvme_lsreq;
|
||||
genwqe->context_un.nvme_lsreq = pnvme_lsreq;
|
||||
/* Fill in payload, bp points to frame payload */
|
||||
|
||||
if (!tmo)
|
||||
@ -730,7 +732,7 @@ __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_lock(&pring->ring_lock);
|
||||
list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
|
||||
if (wqe->context2 == pnvme_lsreq) {
|
||||
if (wqe->context_un.nvme_lsreq == pnvme_lsreq) {
|
||||
wqe->cmd_flag |= LPFC_DRIVER_ABORTED;
|
||||
foundit = true;
|
||||
break;
|
||||
@ -929,8 +931,7 @@ static void
|
||||
lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
struct lpfc_iocbq *pwqeOut)
|
||||
{
|
||||
struct lpfc_io_buf *lpfc_ncmd =
|
||||
(struct lpfc_io_buf *)pwqeIn->context1;
|
||||
struct lpfc_io_buf *lpfc_ncmd = pwqeIn->io_buf;
|
||||
struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
|
||||
struct lpfc_vport *vport = pwqeIn->vport;
|
||||
struct nvmefc_fcp_req *nCmd;
|
||||
@ -1400,8 +1401,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
if ((nseg - 1) == i)
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
||||
|
||||
physaddr = data_sg->dma_address;
|
||||
dma_len = data_sg->length;
|
||||
physaddr = sg_dma_address(data_sg);
|
||||
dma_len = sg_dma_len(data_sg);
|
||||
sgl->addr_lo = cpu_to_le32(
|
||||
putPaddrLow(physaddr));
|
||||
sgl->addr_hi = cpu_to_le32(
|
||||
@ -2356,6 +2357,11 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
|
||||
|
||||
spin_lock_irq(&ndlp->lock);
|
||||
|
||||
/* If an oldrport exists, so does the ndlp reference. If not
|
||||
* a new reference is needed because either the node has never
|
||||
* been registered or it's been unregistered and getting deleted.
|
||||
*/
|
||||
oldrport = lpfc_ndlp_get_nrport(ndlp);
|
||||
if (oldrport) {
|
||||
prev_ndlp = oldrport->ndlp;
|
||||
@ -2466,12 +2472,12 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
if (!nrport || !remoteport)
|
||||
goto rescan_exit;
|
||||
|
||||
/* Only rescan if we are an NVME target in the MAPPED state */
|
||||
/* Rescan an NVME target in MAPPED state with DISCOVERY role set */
|
||||
if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
|
||||
ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
|
||||
nvme_fc_rescan_remoteport(remoteport);
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6172 NVME rescanned DID x%06x "
|
||||
"port_state x%x\n",
|
||||
ndlp->nlp_DID, remoteport->port_state);
|
||||
@ -2717,7 +2723,7 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
struct lpfc_wcqe_complete wcqe;
|
||||
struct lpfc_wcqe_complete *wcqep = &wcqe;
|
||||
|
||||
lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
|
||||
lpfc_ncmd = pwqeIn->io_buf;
|
||||
if (!lpfc_ncmd) {
|
||||
lpfc_sli_release_iocbq(phba, pwqeIn);
|
||||
return;
|
||||
|
@ -295,7 +295,7 @@ void
|
||||
__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
struct lpfc_iocbq *rspwqe)
|
||||
{
|
||||
struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
|
||||
struct lpfc_async_xchg_ctx *axchg = cmdwqe->context_un.axchg;
|
||||
struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
|
||||
struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
|
||||
uint32_t status, result;
|
||||
@ -317,9 +317,9 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
"6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
|
||||
status, result, axchg->oxid);
|
||||
|
||||
lpfc_nlp_put(cmdwqe->context1);
|
||||
cmdwqe->context2 = NULL;
|
||||
cmdwqe->context3 = NULL;
|
||||
lpfc_nlp_put(cmdwqe->ndlp);
|
||||
cmdwqe->context_un.axchg = NULL;
|
||||
cmdwqe->bpl_dmabuf = NULL;
|
||||
lpfc_sli_release_iocbq(phba, cmdwqe);
|
||||
ls_rsp->done(ls_rsp);
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
||||
@ -728,7 +728,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
int id;
|
||||
#endif
|
||||
|
||||
ctxp = cmdwqe->context2;
|
||||
ctxp = cmdwqe->context_un.axchg;
|
||||
ctxp->flag &= ~LPFC_NVME_IO_INP;
|
||||
|
||||
rsp = &ctxp->hdlrctx.fcp_req;
|
||||
@ -903,7 +903,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
|
||||
/* Save numBdes for bpl2sgl */
|
||||
nvmewqeq->num_bdes = 1;
|
||||
nvmewqeq->hba_wqidx = 0;
|
||||
nvmewqeq->context3 = &dmabuf;
|
||||
nvmewqeq->bpl_dmabuf = &dmabuf;
|
||||
dmabuf.virt = &bpl;
|
||||
bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
|
||||
bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
|
||||
@ -917,7 +917,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
|
||||
*/
|
||||
|
||||
nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp;
|
||||
nvmewqeq->context2 = axchg;
|
||||
nvmewqeq->context_un.axchg = axchg;
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
|
||||
axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
|
||||
@ -925,7 +925,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
|
||||
rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
|
||||
|
||||
/* clear to be sure there's no reference */
|
||||
nvmewqeq->context3 = NULL;
|
||||
nvmewqeq->bpl_dmabuf = NULL;
|
||||
|
||||
if (rc == WQE_SUCCESS) {
|
||||
/*
|
||||
@ -942,7 +942,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
|
||||
|
||||
rc = -ENXIO;
|
||||
|
||||
lpfc_nlp_put(nvmewqeq->context1);
|
||||
lpfc_nlp_put(nvmewqeq->ndlp);
|
||||
|
||||
out_free_buf:
|
||||
/* Give back resources */
|
||||
@ -1075,7 +1075,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||
}
|
||||
|
||||
nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
|
||||
nvmewqeq->context2 = ctxp;
|
||||
nvmewqeq->context_un.axchg = ctxp;
|
||||
nvmewqeq->cmd_flag |= LPFC_IO_NVMET;
|
||||
ctxp->wqeq->hba_wqidx = rsp->hwqid;
|
||||
|
||||
@ -1119,8 +1119,8 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||
ctxp->oxid, rc);
|
||||
|
||||
ctxp->wqeq->hba_wqidx = 0;
|
||||
nvmewqeq->context2 = NULL;
|
||||
nvmewqeq->context3 = NULL;
|
||||
nvmewqeq->context_un.axchg = NULL;
|
||||
nvmewqeq->bpl_dmabuf = NULL;
|
||||
rc = -EBUSY;
|
||||
aerr:
|
||||
return rc;
|
||||
@ -1590,7 +1590,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
|
||||
/* Initialize WQE */
|
||||
memset(wqe, 0, sizeof(union lpfc_wqe));
|
||||
|
||||
ctx_buf->iocbq->context1 = NULL;
|
||||
ctx_buf->iocbq->cmd_dmabuf = NULL;
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
@ -2025,7 +2025,7 @@ lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
|
||||
&wq->wqfull_list, list) {
|
||||
if (ctxp) {
|
||||
/* Checking for a specific IO to flush */
|
||||
if (nvmewqeq->context2 == ctxp) {
|
||||
if (nvmewqeq->context_un.axchg == ctxp) {
|
||||
list_del(&nvmewqeq->list);
|
||||
spin_unlock_irqrestore(&pring->ring_lock,
|
||||
iflags);
|
||||
@ -2071,7 +2071,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
|
||||
list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
|
||||
list);
|
||||
spin_unlock_irqrestore(&pring->ring_lock, iflags);
|
||||
ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
|
||||
ctxp = nvmewqeq->context_un.axchg;
|
||||
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
|
||||
spin_lock_irqsave(&pring->ring_lock, iflags);
|
||||
if (rc == -EBUSY) {
|
||||
@ -2617,10 +2617,10 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
|
||||
ctxp->wqeq = nvmewqe;
|
||||
|
||||
/* prevent preparing wqe with NULL ndlp reference */
|
||||
nvmewqe->context1 = lpfc_nlp_get(ndlp);
|
||||
if (nvmewqe->context1 == NULL)
|
||||
nvmewqe->ndlp = lpfc_nlp_get(ndlp);
|
||||
if (!nvmewqe->ndlp)
|
||||
goto nvme_wqe_free_wqeq_exit;
|
||||
nvmewqe->context2 = ctxp;
|
||||
nvmewqe->context_un.axchg = ctxp;
|
||||
|
||||
wqe = &nvmewqe->wqe;
|
||||
memset(wqe, 0, sizeof(union lpfc_wqe));
|
||||
@ -2692,8 +2692,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
|
||||
return nvmewqe;
|
||||
|
||||
nvme_wqe_free_wqeq_exit:
|
||||
nvmewqe->context2 = NULL;
|
||||
nvmewqe->context3 = NULL;
|
||||
nvmewqe->context_un.axchg = NULL;
|
||||
nvmewqe->ndlp = NULL;
|
||||
nvmewqe->bpl_dmabuf = NULL;
|
||||
lpfc_sli_release_iocbq(phba, nvmewqe);
|
||||
return NULL;
|
||||
}
|
||||
@ -2995,7 +2996,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
nvmewqe->retry = 1;
|
||||
nvmewqe->vport = phba->pport;
|
||||
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
|
||||
nvmewqe->context1 = ndlp;
|
||||
nvmewqe->ndlp = ndlp;
|
||||
|
||||
for_each_sg(rsp->sg, sgel, nsegs, i) {
|
||||
physaddr = sg_dma_address(sgel);
|
||||
@ -3053,7 +3054,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
bool released = false;
|
||||
struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
|
||||
|
||||
ctxp = cmdwqe->context2;
|
||||
ctxp = cmdwqe->context_un.axchg;
|
||||
result = wcqe->parameter;
|
||||
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
@ -3084,8 +3085,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
wcqe->word0, wcqe->total_data_placed,
|
||||
result, wcqe->word3);
|
||||
|
||||
cmdwqe->context2 = NULL;
|
||||
cmdwqe->context3 = NULL;
|
||||
cmdwqe->rsp_dmabuf = NULL;
|
||||
cmdwqe->bpl_dmabuf = NULL;
|
||||
/*
|
||||
* if transport has released ctx, then can reuse it. Otherwise,
|
||||
* will be recycled by transport release call.
|
||||
@ -3123,7 +3124,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
bool released = false;
|
||||
struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
|
||||
|
||||
ctxp = cmdwqe->context2;
|
||||
ctxp = cmdwqe->context_un.axchg;
|
||||
result = wcqe->parameter;
|
||||
|
||||
if (!ctxp) {
|
||||
@ -3169,8 +3170,8 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
wcqe->word0, wcqe->total_data_placed,
|
||||
result, wcqe->word3);
|
||||
|
||||
cmdwqe->context2 = NULL;
|
||||
cmdwqe->context3 = NULL;
|
||||
cmdwqe->rsp_dmabuf = NULL;
|
||||
cmdwqe->bpl_dmabuf = NULL;
|
||||
/*
|
||||
* if transport has released ctx, then can reuse it. Otherwise,
|
||||
* will be recycled by transport release call.
|
||||
@ -3203,7 +3204,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
uint32_t result;
|
||||
struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
|
||||
|
||||
ctxp = cmdwqe->context2;
|
||||
ctxp = cmdwqe->context_un.axchg;
|
||||
result = wcqe->parameter;
|
||||
|
||||
if (phba->nvmet_support) {
|
||||
@ -3234,8 +3235,8 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
||||
}
|
||||
|
||||
cmdwqe->context2 = NULL;
|
||||
cmdwqe->context3 = NULL;
|
||||
cmdwqe->rsp_dmabuf = NULL;
|
||||
cmdwqe->bpl_dmabuf = NULL;
|
||||
lpfc_sli_release_iocbq(phba, cmdwqe);
|
||||
kfree(ctxp);
|
||||
}
|
||||
@ -3322,9 +3323,9 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
|
||||
OTHER_COMMAND);
|
||||
|
||||
abts_wqeq->vport = phba->pport;
|
||||
abts_wqeq->context1 = ndlp;
|
||||
abts_wqeq->context2 = ctxp;
|
||||
abts_wqeq->context3 = NULL;
|
||||
abts_wqeq->ndlp = ndlp;
|
||||
abts_wqeq->context_un.axchg = ctxp;
|
||||
abts_wqeq->bpl_dmabuf = NULL;
|
||||
abts_wqeq->num_bdes = 0;
|
||||
/* hba_wqidx should already be setup from command we are aborting */
|
||||
abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
|
||||
@ -3477,7 +3478,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
|
||||
abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
|
||||
abts_wqeq->cmd_flag |= LPFC_IO_NVME;
|
||||
abts_wqeq->context2 = ctxp;
|
||||
abts_wqeq->context_un.axchg = ctxp;
|
||||
abts_wqeq->vport = phba->pport;
|
||||
if (!ctxp->hdwq)
|
||||
ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
|
||||
@ -3630,8 +3631,8 @@ lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
|
||||
out:
|
||||
if (tgtp)
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
abts_wqeq->context2 = NULL;
|
||||
abts_wqeq->context3 = NULL;
|
||||
abts_wqeq->rsp_dmabuf = NULL;
|
||||
abts_wqeq->bpl_dmabuf = NULL;
|
||||
lpfc_sli_release_iocbq(phba, abts_wqeq);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"6056 Failed to Issue ABTS. Status x%x\n", rc);
|
||||
|
@ -433,7 +433,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
|
||||
iocb->ulpClass = CLASS3;
|
||||
psb->status = IOSTAT_SUCCESS;
|
||||
/* Put it back into the SCSI buffer list */
|
||||
psb->cur_iocbq.context1 = psb;
|
||||
psb->cur_iocbq.io_buf = psb;
|
||||
spin_lock_init(&psb->buf_lock);
|
||||
lpfc_release_scsi_buf_s3(phba, psb);
|
||||
|
||||
@ -3835,7 +3835,7 @@ lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
|
||||
else
|
||||
time = div_u64(time + 500, 1000); /* round it */
|
||||
|
||||
cgs = this_cpu_ptr(phba->cmf_stat);
|
||||
cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
|
||||
atomic64_add(size, &cgs->rcv_bytes);
|
||||
atomic64_add(time, &cgs->rx_latency);
|
||||
atomic_inc(&cgs->rx_io_cnt);
|
||||
@ -3879,7 +3879,7 @@ lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
|
||||
atomic_set(&phba->rx_max_read_cnt, size);
|
||||
}
|
||||
|
||||
cgs = this_cpu_ptr(phba->cmf_stat);
|
||||
cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
|
||||
atomic64_add(size, &cgs->total_bytes);
|
||||
return 0;
|
||||
}
|
||||
@ -4082,8 +4082,7 @@ static void
|
||||
lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
struct lpfc_iocbq *pwqeOut)
|
||||
{
|
||||
struct lpfc_io_buf *lpfc_cmd =
|
||||
(struct lpfc_io_buf *)pwqeIn->context1;
|
||||
struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf;
|
||||
struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
|
||||
struct lpfc_vport *vport = pwqeIn->vport;
|
||||
struct lpfc_rport_data *rdata;
|
||||
@ -4276,6 +4275,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
break;
|
||||
}
|
||||
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
|
||||
lpfc_cmd->result == IOERR_LINK_DOWN ||
|
||||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
|
||||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
|
||||
lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
|
||||
@ -4420,7 +4420,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|
||||
struct lpfc_iocbq *pIocbOut)
|
||||
{
|
||||
struct lpfc_io_buf *lpfc_cmd =
|
||||
(struct lpfc_io_buf *) pIocbIn->context1;
|
||||
(struct lpfc_io_buf *) pIocbIn->io_buf;
|
||||
struct lpfc_vport *vport = pIocbIn->vport;
|
||||
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
|
||||
struct lpfc_nodelist *pnode = rdata->pnode;
|
||||
@ -4743,7 +4743,7 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
|
||||
piocbq->iocb.ulpFCP2Rcvy = 0;
|
||||
|
||||
piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
|
||||
piocbq->context1 = lpfc_cmd;
|
||||
piocbq->io_buf = lpfc_cmd;
|
||||
if (!piocbq->cmd_cmpl)
|
||||
piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl;
|
||||
piocbq->iocb.ulpTimeout = tmo;
|
||||
@ -4855,8 +4855,7 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
|
||||
bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
|
||||
|
||||
pwqeq->vport = vport;
|
||||
pwqeq->vport = vport;
|
||||
pwqeq->context1 = lpfc_cmd;
|
||||
pwqeq->io_buf = lpfc_cmd;
|
||||
pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
|
||||
pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
|
||||
|
||||
@ -5097,8 +5096,7 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
|
||||
struct lpfc_iocbq *cmdiocbq,
|
||||
struct lpfc_iocbq *rspiocbq)
|
||||
{
|
||||
struct lpfc_io_buf *lpfc_cmd =
|
||||
(struct lpfc_io_buf *) cmdiocbq->context1;
|
||||
struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf;
|
||||
if (lpfc_cmd)
|
||||
lpfc_release_scsi_buf(phba, lpfc_cmd);
|
||||
return;
|
||||
@ -5346,9 +5344,9 @@ static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
|
||||
{
|
||||
u64 *lta;
|
||||
|
||||
if (vport->vmid_priority_tagging)
|
||||
if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
|
||||
tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
|
||||
else
|
||||
else if (vport->phba->cfg_vmid_app_header)
|
||||
tag->app_id = vmp->un.app_id;
|
||||
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE)
|
||||
@ -5393,11 +5391,12 @@ static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
|
||||
scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag)
|
||||
{
|
||||
struct lpfc_vmid *vmp = NULL;
|
||||
int hash, len, rc, i;
|
||||
int hash, len, rc = -EPERM, i;
|
||||
|
||||
/* check if QFPA is complete */
|
||||
if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag &
|
||||
LPFC_VMID_QFPA_CMPL)) {
|
||||
if (lpfc_vmid_is_type_priority_tag(vport) &&
|
||||
!(vport->vmid_flag & LPFC_VMID_QFPA_CMPL) &&
|
||||
(vport->vmid_flag & LPFC_VMID_ISSUE_QFPA)) {
|
||||
vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
|
||||
return -EAGAIN;
|
||||
}
|
||||
@ -5471,7 +5470,7 @@ static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
|
||||
vport->vmid_inactivity_timeout ? 1 : 0;
|
||||
|
||||
/* if type priority tag, get next available VMID */
|
||||
if (lpfc_vmid_is_type_priority_tag(vport))
|
||||
if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
|
||||
lpfc_vmid_assign_cs_ctl(vport, vmp);
|
||||
|
||||
/* allocate the per cpu variable for holding */
|
||||
@ -5490,9 +5489,9 @@ static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
|
||||
write_unlock(&vport->vmid_lock);
|
||||
|
||||
/* complete transaction with switch */
|
||||
if (lpfc_vmid_is_type_priority_tag(vport))
|
||||
if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
|
||||
rc = lpfc_vmid_uvem(vport, vmp, true);
|
||||
else
|
||||
else if (vport->phba->cfg_vmid_app_header)
|
||||
rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
|
||||
if (!rc) {
|
||||
write_lock(&vport->vmid_lock);
|
||||
@ -5866,25 +5865,25 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
||||
if (!lpfc_cmd)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
/* Guard against IO completion being called at same time */
|
||||
spin_lock_irqsave(&lpfc_cmd->buf_lock, flags);
|
||||
|
||||
spin_lock(&phba->hbalock);
|
||||
/* driver queued commands are in process of being flushed */
|
||||
if (phba->hba_flag & HBA_IOQ_FLUSH) {
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
|
||||
"3168 SCSI Layer abort requested I/O has been "
|
||||
"flushed by LLD.\n");
|
||||
ret = FAILED;
|
||||
goto out_unlock;
|
||||
goto out_unlock_hba;
|
||||
}
|
||||
|
||||
/* Guard against IO completion being called at same time */
|
||||
spin_lock(&lpfc_cmd->buf_lock);
|
||||
|
||||
if (!lpfc_cmd->pCmd) {
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
|
||||
"2873 SCSI Layer I/O Abort Request IO CMPL Status "
|
||||
"x%x ID %d LUN %llu\n",
|
||||
SUCCESS, cmnd->device->id, cmnd->device->lun);
|
||||
goto out_unlock_buf;
|
||||
goto out_unlock_hba;
|
||||
}
|
||||
|
||||
iocb = &lpfc_cmd->cur_iocbq;
|
||||
@ -5892,7 +5891,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
||||
pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
|
||||
if (!pring_s4) {
|
||||
ret = FAILED;
|
||||
goto out_unlock_buf;
|
||||
goto out_unlock_hba;
|
||||
}
|
||||
spin_lock(&pring_s4->ring_lock);
|
||||
}
|
||||
@ -5917,7 +5916,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
||||
goto out_unlock_ring;
|
||||
}
|
||||
|
||||
BUG_ON(iocb->context1 != lpfc_cmd);
|
||||
WARN_ON(iocb->io_buf != lpfc_cmd);
|
||||
|
||||
/* abort issued in recovery is still in progress */
|
||||
if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) {
|
||||
@ -5925,8 +5924,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
||||
"3389 SCSI Layer I/O Abort Request is pending\n");
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
spin_unlock(&pring_s4->ring_lock);
|
||||
spin_unlock(&lpfc_cmd->buf_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
spin_unlock(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
|
||||
goto wait_for_cmpl;
|
||||
}
|
||||
|
||||
@ -5947,15 +5946,13 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
||||
if (ret_val != IOCB_SUCCESS) {
|
||||
/* Indicate the IO is not being aborted by the driver. */
|
||||
lpfc_cmd->waitq = NULL;
|
||||
spin_unlock(&lpfc_cmd->buf_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
ret = FAILED;
|
||||
goto out;
|
||||
goto out_unlock_hba;
|
||||
}
|
||||
|
||||
/* no longer need the lock after this point */
|
||||
spin_unlock(&lpfc_cmd->buf_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
spin_unlock(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
|
||||
|
||||
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
|
||||
lpfc_sli_handle_fast_ring_event(phba,
|
||||
@ -5990,10 +5987,9 @@ wait_for_cmpl:
|
||||
out_unlock_ring:
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
spin_unlock(&pring_s4->ring_lock);
|
||||
out_unlock_buf:
|
||||
spin_unlock(&lpfc_cmd->buf_lock);
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
out_unlock_hba:
|
||||
spin_unlock(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
|
||||
out:
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
|
||||
"0749 SCSI Layer I/O Abort Request Status x%x ID %d "
|
||||
|
@ -1255,18 +1255,18 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
|
||||
cmnd = get_job_cmnd(phba, piocbq);
|
||||
|
||||
if (piocbq->cmd_flag & LPFC_IO_FCP) {
|
||||
lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
|
||||
lpfc_cmd = piocbq->io_buf;
|
||||
ndlp = lpfc_cmd->rdata->pnode;
|
||||
} else if ((cmnd == CMD_GEN_REQUEST64_CR) &&
|
||||
!(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
|
||||
ndlp = piocbq->context_un.ndlp;
|
||||
ndlp = piocbq->ndlp;
|
||||
} else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
|
||||
if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
|
||||
ndlp = NULL;
|
||||
else
|
||||
ndlp = piocbq->context_un.ndlp;
|
||||
ndlp = piocbq->ndlp;
|
||||
} else {
|
||||
ndlp = piocbq->context1;
|
||||
ndlp = piocbq->ndlp;
|
||||
}
|
||||
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
@ -1373,7 +1373,7 @@ static void
|
||||
__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
|
||||
{
|
||||
struct lpfc_sglq *sglq;
|
||||
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
|
||||
size_t start_clean = offsetof(struct lpfc_iocbq, wqe);
|
||||
unsigned long iflag = 0;
|
||||
struct lpfc_sli_ring *pring;
|
||||
|
||||
@ -1996,9 +1996,9 @@ initpath:
|
||||
|
||||
sync_buf->vport = phba->pport;
|
||||
sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
|
||||
sync_buf->context1 = NULL;
|
||||
sync_buf->context2 = NULL;
|
||||
sync_buf->context3 = NULL;
|
||||
sync_buf->cmd_dmabuf = NULL;
|
||||
sync_buf->rsp_dmabuf = NULL;
|
||||
sync_buf->bpl_dmabuf = NULL;
|
||||
sync_buf->sli4_xritag = NO_XRI;
|
||||
|
||||
sync_buf->cmd_flag |= LPFC_IO_CMF;
|
||||
@ -2848,19 +2848,11 @@ void
|
||||
lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
{
|
||||
struct lpfc_vport *vport = pmb->vport;
|
||||
struct lpfc_dmabuf *mp;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct Scsi_Host *shost;
|
||||
uint16_t rpi, vpi;
|
||||
int rc;
|
||||
|
||||
mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
|
||||
|
||||
if (mp) {
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
}
|
||||
|
||||
/*
|
||||
* If a REG_LOGIN succeeded after node is destroyed or node
|
||||
* is in re-discovery driver need to cleanup the RPI.
|
||||
@ -2893,8 +2885,6 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
|
||||
ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
|
||||
lpfc_nlp_put(ndlp);
|
||||
pmb->ctx_buf = NULL;
|
||||
pmb->ctx_ndlp = NULL;
|
||||
}
|
||||
|
||||
if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
|
||||
@ -2945,7 +2935,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
|
||||
lpfc_sli4_mbox_cmd_free(phba, pmb);
|
||||
else
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
}
|
||||
/**
|
||||
* lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
|
||||
@ -3197,7 +3187,7 @@ lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
|
||||
uint32_t oxid, sid, did, fctl, size;
|
||||
int ret = 1;
|
||||
|
||||
d_buf = piocb->context2;
|
||||
d_buf = piocb->cmd_dmabuf;
|
||||
|
||||
nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
|
||||
fc_hdr = nvmebuf->hbuf.virt;
|
||||
@ -3478,9 +3468,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
|
||||
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
|
||||
if (irsp->ulpBdeCount != 0) {
|
||||
saveq->context2 = lpfc_sli_get_buff(phba, pring,
|
||||
saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
|
||||
irsp->un.ulpWord[3]);
|
||||
if (!saveq->context2)
|
||||
if (!saveq->cmd_dmabuf)
|
||||
lpfc_printf_log(phba,
|
||||
KERN_ERR,
|
||||
LOG_SLI,
|
||||
@ -3490,9 +3480,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
irsp->un.ulpWord[3]);
|
||||
}
|
||||
if (irsp->ulpBdeCount == 2) {
|
||||
saveq->context3 = lpfc_sli_get_buff(phba, pring,
|
||||
saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
|
||||
irsp->unsli3.sli3Words[7]);
|
||||
if (!saveq->context3)
|
||||
if (!saveq->bpl_dmabuf)
|
||||
lpfc_printf_log(phba,
|
||||
KERN_ERR,
|
||||
LOG_SLI,
|
||||
@ -3504,10 +3494,10 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
list_for_each_entry(iocbq, &saveq->list, list) {
|
||||
irsp = &iocbq->iocb;
|
||||
if (irsp->ulpBdeCount != 0) {
|
||||
iocbq->context2 = lpfc_sli_get_buff(phba,
|
||||
iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
|
||||
pring,
|
||||
irsp->un.ulpWord[3]);
|
||||
if (!iocbq->context2)
|
||||
if (!iocbq->cmd_dmabuf)
|
||||
lpfc_printf_log(phba,
|
||||
KERN_ERR,
|
||||
LOG_SLI,
|
||||
@ -3517,10 +3507,10 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
irsp->un.ulpWord[3]);
|
||||
}
|
||||
if (irsp->ulpBdeCount == 2) {
|
||||
iocbq->context3 = lpfc_sli_get_buff(phba,
|
||||
iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
|
||||
pring,
|
||||
irsp->unsli3.sli3Words[7]);
|
||||
if (!iocbq->context3)
|
||||
if (!iocbq->bpl_dmabuf)
|
||||
lpfc_printf_log(phba,
|
||||
KERN_ERR,
|
||||
LOG_SLI,
|
||||
@ -3534,12 +3524,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
} else {
|
||||
paddr = getPaddr(irsp->un.cont64[0].addrHigh,
|
||||
irsp->un.cont64[0].addrLow);
|
||||
saveq->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
|
||||
saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
|
||||
paddr);
|
||||
if (irsp->ulpBdeCount == 2) {
|
||||
paddr = getPaddr(irsp->un.cont64[1].addrHigh,
|
||||
irsp->un.cont64[1].addrLow);
|
||||
saveq->context3 = lpfc_sli_ringpostbuf_get(phba,
|
||||
saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
|
||||
pring,
|
||||
paddr);
|
||||
}
|
||||
@ -3717,7 +3707,6 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
struct lpfc_iocbq *saveq)
|
||||
{
|
||||
struct lpfc_iocbq *cmdiocbp;
|
||||
int rc = 1;
|
||||
unsigned long iflag;
|
||||
u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
|
||||
|
||||
@ -3857,7 +3846,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -5275,6 +5264,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
|
||||
phba->pport->stopped = 0;
|
||||
phba->link_state = LPFC_INIT_START;
|
||||
phba->hba_flag = 0;
|
||||
phba->sli4_hba.fawwpn_flag = 0;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
|
||||
@ -5851,26 +5841,20 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
|
||||
mboxq->mcqe.trailer);
|
||||
|
||||
if (rc) {
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
rc = -EIO;
|
||||
goto out_free_mboxq;
|
||||
}
|
||||
data_length = mqe->un.mb_words[5];
|
||||
if (data_length > DMP_RGN23_SIZE) {
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
rc = -EIO;
|
||||
goto out_free_mboxq;
|
||||
}
|
||||
|
||||
lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
rc = 0;
|
||||
|
||||
out_free_mboxq:
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -7994,10 +7978,6 @@ lpfc_cmf_setup(struct lpfc_hba *phba)
|
||||
|
||||
sli4_params = &phba->sli4_hba.pc_sli4_params;
|
||||
|
||||
/* Are we forcing MI off via module parameter? */
|
||||
if (!phba->cfg_enable_mi)
|
||||
sli4_params->mi_ver = 0;
|
||||
|
||||
/* Always try to enable MI feature if we can */
|
||||
if (sli4_params->mi_ver) {
|
||||
lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
|
||||
@ -8543,8 +8523,9 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
||||
}
|
||||
|
||||
/*
|
||||
* This memory was allocated by the lpfc_read_sparam routine. Release
|
||||
* it to the mbuf pool.
|
||||
* This memory was allocated by the lpfc_read_sparam routine but is
|
||||
* no longer needed. It is released and ctx_buf NULLed to prevent
|
||||
* unintended pointer access as the mbox is reused.
|
||||
*/
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
@ -8864,6 +8845,9 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
||||
}
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
|
||||
/* Enable RAS FW log support */
|
||||
lpfc_sli4_ras_setup(phba);
|
||||
|
||||
phba->hba_flag |= HBA_SETUP;
|
||||
return rc;
|
||||
|
||||
@ -10343,8 +10327,7 @@ __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
|
||||
struct lpfc_iocbq *piocb, uint32_t flag)
|
||||
{
|
||||
int rc;
|
||||
struct lpfc_io_buf *lpfc_cmd =
|
||||
(struct lpfc_io_buf *)piocb->context1;
|
||||
struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
|
||||
|
||||
lpfc_prep_embed_io(phba, lpfc_cmd);
|
||||
rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
|
||||
@ -10394,11 +10377,11 @@ lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
|
||||
|
||||
/* add the VMID tags as per switch response */
|
||||
if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
|
||||
if (phba->pport->vmid_priority_tagging) {
|
||||
if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
|
||||
bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
|
||||
bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
|
||||
(piocb->vmid_tag.cs_ctl_vmid));
|
||||
} else {
|
||||
} else if (phba->cfg_vmid_app_header) {
|
||||
bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
|
||||
bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
|
||||
wqe->words[31] = piocb->vmid_tag.app_id;
|
||||
@ -10599,6 +10582,7 @@ __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
union lpfc_wqe128 *wqe;
|
||||
struct ulp_bde64_le *bde;
|
||||
u8 els_id;
|
||||
|
||||
wqe = &cmdiocbq->wqe;
|
||||
memset(wqe, 0, sizeof(*wqe));
|
||||
@ -10611,7 +10595,7 @@ __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
|
||||
bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
|
||||
|
||||
if (expect_rsp) {
|
||||
bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_CR);
|
||||
bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
|
||||
|
||||
/* Transfer length */
|
||||
wqe->els_req.payload_len = cmd_size;
|
||||
@ -10619,6 +10603,30 @@ __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
|
||||
|
||||
/* DID */
|
||||
bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
|
||||
|
||||
/* Word 11 - ELS_ID */
|
||||
switch (elscmd) {
|
||||
case ELS_CMD_PLOGI:
|
||||
els_id = LPFC_ELS_ID_PLOGI;
|
||||
break;
|
||||
case ELS_CMD_FLOGI:
|
||||
els_id = LPFC_ELS_ID_FLOGI;
|
||||
break;
|
||||
case ELS_CMD_LOGO:
|
||||
els_id = LPFC_ELS_ID_LOGO;
|
||||
break;
|
||||
case ELS_CMD_FDISC:
|
||||
if (!vport->fc_myDID) {
|
||||
els_id = LPFC_ELS_ID_FDISC;
|
||||
break;
|
||||
}
|
||||
fallthrough;
|
||||
default:
|
||||
els_id = LPFC_ELS_ID_DEFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
|
||||
} else {
|
||||
/* DID */
|
||||
bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
|
||||
@ -10627,7 +10635,7 @@ __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
|
||||
wqe->xmit_els_rsp.response_payload_len = cmd_size;
|
||||
|
||||
bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
|
||||
CMD_XMIT_ELS_RSP64_CX);
|
||||
CMD_XMIT_ELS_RSP64_WQE);
|
||||
}
|
||||
|
||||
bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
|
||||
@ -10643,7 +10651,7 @@ __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
|
||||
if (expect_rsp) {
|
||||
bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
|
||||
|
||||
/* For ELS_REQUEST64_CR, use the VPI by default */
|
||||
/* For ELS_REQUEST64_WQE, use the VPI by default */
|
||||
bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
|
||||
phba->vpi_ids[vport->vpi]);
|
||||
}
|
||||
@ -10800,24 +10808,15 @@ __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
|
||||
{
|
||||
union lpfc_wqe128 *wqe;
|
||||
struct ulp_bde64 *bpl;
|
||||
struct ulp_bde64_le *bde;
|
||||
|
||||
wqe = &cmdiocbq->wqe;
|
||||
memset(wqe, 0, sizeof(*wqe));
|
||||
|
||||
/* Words 0 - 2 */
|
||||
bpl = (struct ulp_bde64 *)bmp->virt;
|
||||
if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) {
|
||||
wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
|
||||
wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
|
||||
wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
|
||||
} else {
|
||||
bde = (struct ulp_bde64_le *)&wqe->xmit_sequence.bde;
|
||||
bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
|
||||
bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
|
||||
bde->type_size = cpu_to_le32(bpl->tus.f.bdeSize);
|
||||
bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
|
||||
}
|
||||
|
||||
/* Word 5 */
|
||||
bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
|
||||
@ -10990,7 +10989,7 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
|
||||
* be setup based on what work queue we used.
|
||||
*/
|
||||
if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
|
||||
lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
|
||||
lpfc_cmd = piocb->io_buf;
|
||||
piocb->hba_wqidx = lpfc_cmd->hdwq_no;
|
||||
}
|
||||
return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
|
||||
@ -12064,8 +12063,9 @@ void
|
||||
lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_iocbq *rspiocb)
|
||||
{
|
||||
struct lpfc_nodelist *ndlp = NULL;
|
||||
struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
|
||||
IOCB_t *irsp;
|
||||
LPFC_MBOXQ_t *mbox;
|
||||
u32 ulp_command, ulp_status, ulp_word4, iotag;
|
||||
|
||||
ulp_command = get_job_cmnd(phba, cmdiocb);
|
||||
@ -12077,25 +12077,32 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
} else {
|
||||
irsp = &rspiocb->iocb;
|
||||
iotag = irsp->ulpIoTag;
|
||||
|
||||
/* It is possible a PLOGI_RJT for NPIV ports to get aborted.
|
||||
* The MBX_REG_LOGIN64 mbox command is freed back to the
|
||||
* mbox_mem_pool here.
|
||||
*/
|
||||
if (cmdiocb->context_un.mbox) {
|
||||
mbox = cmdiocb->context_un.mbox;
|
||||
lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
|
||||
cmdiocb->context_un.mbox = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* ELS cmd tag <ulpIoTag> completes */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
|
||||
"0139 Ignoring ELS cmd code x%x completion Data: "
|
||||
"x%x x%x x%x\n",
|
||||
ulp_command, ulp_status, ulp_word4, iotag);
|
||||
|
||||
"x%x x%x x%x x%px\n",
|
||||
ulp_command, ulp_status, ulp_word4, iotag,
|
||||
cmdiocb->ndlp);
|
||||
/*
|
||||
* Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
|
||||
* if exchange is busy.
|
||||
*/
|
||||
if (ulp_command == CMD_GEN_REQUEST64_CR) {
|
||||
ndlp = cmdiocb->context_un.ndlp;
|
||||
if (ulp_command == CMD_GEN_REQUEST64_CR)
|
||||
lpfc_ct_free_iocb(phba, cmdiocb);
|
||||
} else {
|
||||
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
|
||||
else
|
||||
lpfc_els_free_iocb(phba, cmdiocb);
|
||||
}
|
||||
|
||||
lpfc_nlp_put(ndlp);
|
||||
}
|
||||
@ -12176,7 +12183,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
} else {
|
||||
iotag = cmdiocb->iocb.ulpIoTag;
|
||||
if (pring->ringno == LPFC_ELS_RING) {
|
||||
ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
|
||||
ndlp = cmdiocb->ndlp;
|
||||
ulp_context = ndlp->nlp_rpi;
|
||||
} else {
|
||||
ulp_context = cmdiocb->iocb.ulpContext;
|
||||
@ -12185,7 +12192,8 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
|
||||
if (phba->link_state < LPFC_LINK_UP ||
|
||||
(phba->sli_rev == LPFC_SLI_REV4 &&
|
||||
phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN))
|
||||
phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
|
||||
(phba->link_flag & LS_EXTERNAL_LOOPBACK))
|
||||
ia = true;
|
||||
else
|
||||
ia = false;
|
||||
@ -12634,7 +12642,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
||||
} else {
|
||||
iotag = iocbq->iocb.ulpIoTag;
|
||||
if (pring->ringno == LPFC_ELS_RING) {
|
||||
ndlp = (struct lpfc_nodelist *)(iocbq->context1);
|
||||
ndlp = iocbq->ndlp;
|
||||
ulp_context = ndlp->nlp_rpi;
|
||||
} else {
|
||||
ulp_context = iocbq->iocb.ulpContext;
|
||||
@ -12644,7 +12652,8 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
||||
ndlp = lpfc_cmd->rdata->pnode;
|
||||
|
||||
if (lpfc_is_link_up(phba) &&
|
||||
(ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
|
||||
(ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
|
||||
!(phba->link_flag & LS_EXTERNAL_LOOPBACK))
|
||||
ia = false;
|
||||
else
|
||||
ia = true;
|
||||
@ -12739,8 +12748,8 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
|
||||
|
||||
/* Copy the contents of the local rspiocb into the caller's buffer. */
|
||||
cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
|
||||
if (cmdiocbq->context2 && rspiocbq)
|
||||
memcpy((char *)cmdiocbq->context2 + offset,
|
||||
if (cmdiocbq->rsp_iocb && rspiocbq)
|
||||
memcpy((char *)cmdiocbq->rsp_iocb + offset,
|
||||
(char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
|
||||
|
||||
/* Set the exchange busy flag for task management commands */
|
||||
@ -12848,13 +12857,13 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
|
||||
} else
|
||||
pring = &phba->sli.sli3_ring[ring_number];
|
||||
/*
|
||||
* If the caller has provided a response iocbq buffer, then context2
|
||||
* If the caller has provided a response iocbq buffer, then rsp_iocb
|
||||
* is NULL or its an error.
|
||||
*/
|
||||
if (prspiocbq) {
|
||||
if (piocb->context2)
|
||||
if (piocb->rsp_iocb)
|
||||
return IOCB_ERROR;
|
||||
piocb->context2 = prspiocbq;
|
||||
piocb->rsp_iocb = prspiocbq;
|
||||
}
|
||||
|
||||
piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
|
||||
@ -12938,7 +12947,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
|
||||
}
|
||||
|
||||
if (prspiocbq)
|
||||
piocb->context2 = NULL;
|
||||
piocb->rsp_iocb = NULL;
|
||||
|
||||
piocb->context_un.wait_queue = NULL;
|
||||
piocb->cmd_cmpl = NULL;
|
||||
@ -15732,7 +15741,6 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
|
||||
|
||||
mbox->vport = phba->pport;
|
||||
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
mbox->ctx_buf = NULL;
|
||||
mbox->ctx_ndlp = NULL;
|
||||
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
|
||||
shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
|
||||
@ -18107,7 +18115,6 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
|
||||
case FC_RCTL_ELS_REP: /* extended link services reply */
|
||||
case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
|
||||
case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
|
||||
case FC_RCTL_BA_NOP: /* basic link service NOP */
|
||||
case FC_RCTL_BA_ABTS: /* basic link service abort */
|
||||
case FC_RCTL_BA_RMC: /* remove connection */
|
||||
case FC_RCTL_BA_ACC: /* basic accept */
|
||||
@ -18128,6 +18135,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
|
||||
fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
|
||||
fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
|
||||
return lpfc_fc_frame_check(phba, fc_hdr);
|
||||
case FC_RCTL_BA_NOP: /* basic link service NOP */
|
||||
default:
|
||||
goto drop;
|
||||
}
|
||||
@ -18512,11 +18520,8 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
|
||||
struct lpfc_iocbq *cmd_iocbq,
|
||||
struct lpfc_iocbq *rsp_iocbq)
|
||||
{
|
||||
struct lpfc_nodelist *ndlp;
|
||||
|
||||
if (cmd_iocbq) {
|
||||
ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
|
||||
lpfc_nlp_put(ndlp);
|
||||
lpfc_nlp_put(cmd_iocbq->ndlp);
|
||||
lpfc_sli_release_iocbq(phba, cmd_iocbq);
|
||||
}
|
||||
|
||||
@ -18600,8 +18605,8 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
|
||||
/* Extract the F_CTL field from FC_HDR */
|
||||
fctl = sli4_fctl_from_fc_hdr(fc_hdr);
|
||||
|
||||
ctiocb->context1 = lpfc_nlp_get(ndlp);
|
||||
if (!ctiocb->context1) {
|
||||
ctiocb->ndlp = lpfc_nlp_get(ndlp);
|
||||
if (!ctiocb->ndlp) {
|
||||
lpfc_sli_release_iocbq(phba, ctiocb);
|
||||
return;
|
||||
}
|
||||
@ -18677,13 +18682,11 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
|
||||
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
|
||||
bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
|
||||
|
||||
|
||||
/* Xmit CT abts response on exchange <xid> */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
|
||||
"1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
|
||||
ctiocb->abort_rctl, oxid, phba->link_state);
|
||||
|
||||
lpfc_sli_prep_wqe(phba, ctiocb);
|
||||
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
|
||||
if (rc == IOCB_ERROR) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
@ -18692,7 +18695,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
|
||||
ctiocb->abort_rctl, oxid,
|
||||
phba->link_state);
|
||||
lpfc_nlp_put(ndlp);
|
||||
ctiocb->context1 = NULL;
|
||||
ctiocb->ndlp = NULL;
|
||||
lpfc_sli_release_iocbq(phba, ctiocb);
|
||||
}
|
||||
}
|
||||
@ -18844,8 +18847,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
|
||||
tot_len = bf_get(lpfc_rcqe_length,
|
||||
&seq_dmabuf->cq_event.cqe.rcqe_cmpl);
|
||||
|
||||
first_iocbq->context2 = &seq_dmabuf->dbuf;
|
||||
first_iocbq->context3 = NULL;
|
||||
first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
|
||||
first_iocbq->bpl_dmabuf = NULL;
|
||||
/* Keep track of the BDE count */
|
||||
first_iocbq->wcqe_cmpl.word3 = 1;
|
||||
|
||||
@ -18869,8 +18872,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
|
||||
lpfc_in_buf_free(vport->phba, d_buf);
|
||||
continue;
|
||||
}
|
||||
if (!iocbq->context3) {
|
||||
iocbq->context3 = d_buf;
|
||||
if (!iocbq->bpl_dmabuf) {
|
||||
iocbq->bpl_dmabuf = d_buf;
|
||||
iocbq->wcqe_cmpl.word3++;
|
||||
/* We need to get the size out of the right CQE */
|
||||
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
|
||||
@ -18896,8 +18899,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
|
||||
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
|
||||
len = bf_get(lpfc_rcqe_length,
|
||||
&hbq_buf->cq_event.cqe.rcqe_cmpl);
|
||||
iocbq->context2 = d_buf;
|
||||
iocbq->context3 = NULL;
|
||||
iocbq->cmd_dmabuf = d_buf;
|
||||
iocbq->bpl_dmabuf = NULL;
|
||||
iocbq->wcqe_cmpl.word3 = 1;
|
||||
|
||||
if (len > LPFC_DATA_BUF_SIZE)
|
||||
@ -18942,12 +18945,14 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
|
||||
if (!lpfc_complete_unsol_iocb(phba,
|
||||
phba->sli4_hba.els_wq->pring,
|
||||
iocbq, fc_hdr->fh_r_ctl,
|
||||
fc_hdr->fh_type))
|
||||
fc_hdr->fh_type)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"2540 Ring %d handler: unexpected Rctl "
|
||||
"x%x Type x%x received\n",
|
||||
LPFC_ELS_RING,
|
||||
fc_hdr->fh_r_ctl, fc_hdr->fh_type);
|
||||
lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
|
||||
}
|
||||
|
||||
/* Free iocb created in lpfc_prep_seq */
|
||||
list_for_each_entry_safe(curr_iocb, next_iocb,
|
||||
@ -18962,7 +18967,7 @@ static void
|
||||
lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_iocbq *rspiocb)
|
||||
{
|
||||
struct lpfc_dmabuf *pcmd = cmdiocb->context2;
|
||||
struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
|
||||
|
||||
if (pcmd && pcmd->virt)
|
||||
dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
|
||||
@ -19013,7 +19018,7 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
|
||||
/* copyin the payload */
|
||||
memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
|
||||
|
||||
iocbq->context2 = pcmd;
|
||||
iocbq->cmd_dmabuf = pcmd;
|
||||
iocbq->vport = vport;
|
||||
iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
|
||||
iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
|
||||
@ -20332,11 +20337,7 @@ lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
|
||||
}
|
||||
lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
|
||||
out:
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
if (mp) {
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
}
|
||||
lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
|
||||
return data_length;
|
||||
}
|
||||
|
||||
@ -20651,7 +20652,6 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
LPFC_MBOXQ_t *mb, *nextmb;
|
||||
struct lpfc_dmabuf *mp;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct lpfc_nodelist *act_mbx_ndlp = NULL;
|
||||
LIST_HEAD(mbox_cmd_list);
|
||||
@ -20677,8 +20677,12 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
|
||||
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
|
||||
act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
|
||||
/* Put reference count for delayed processing */
|
||||
|
||||
/* This reference is local to this routine. The
|
||||
* reference is removed at routine exit.
|
||||
*/
|
||||
act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
|
||||
|
||||
/* Unregister the RPI when mailbox complete */
|
||||
mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
|
||||
}
|
||||
@ -20721,12 +20725,6 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
|
||||
while (!list_empty(&mbox_cmd_list)) {
|
||||
list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
|
||||
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
|
||||
mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
|
||||
if (mp) {
|
||||
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
}
|
||||
mb->ctx_buf = NULL;
|
||||
ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
|
||||
mb->ctx_ndlp = NULL;
|
||||
if (ndlp) {
|
||||
@ -20736,7 +20734,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
|
||||
lpfc_nlp_put(ndlp);
|
||||
}
|
||||
}
|
||||
mempool_free(mb, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
|
||||
}
|
||||
|
||||
/* Release the ndlp with the cleaned-up active mailbox command */
|
||||
@ -20888,8 +20886,8 @@ lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
|
||||
* have not been byteswapped yet so there is no
|
||||
* need to swap them back.
|
||||
*/
|
||||
if (pwqeq->context3)
|
||||
dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
|
||||
if (pwqeq->bpl_dmabuf)
|
||||
dmabuf = pwqeq->bpl_dmabuf;
|
||||
else
|
||||
return xritag;
|
||||
|
||||
@ -21041,7 +21039,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
|
||||
wq = qp->io_wq;
|
||||
pring = wq->pring;
|
||||
|
||||
ctxp = pwqe->context2;
|
||||
ctxp = pwqe->context_un.axchg;
|
||||
sglq = ctxp->ctxbuf->sglq;
|
||||
if (pwqe->sli4_xritag == NO_XRI) {
|
||||
pwqe->sli4_lxritag = sglq->sli4_lxritag;
|
||||
@ -21107,7 +21105,7 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
abtswqe = &abtsiocb->wqe;
|
||||
memset(abtswqe, 0, sizeof(*abtswqe));
|
||||
|
||||
if (!lpfc_is_link_up(phba))
|
||||
if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
|
||||
bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
|
||||
bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
|
||||
abtswqe->abort_cmd.rsrvd5 = 0;
|
||||
@ -21883,7 +21881,6 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
|
||||
|
||||
mbox->vport = phba->pport;
|
||||
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
mbox->ctx_buf = NULL;
|
||||
mbox->ctx_ndlp = NULL;
|
||||
|
||||
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
|
||||
@ -21920,9 +21917,12 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
|
||||
}
|
||||
|
||||
exit:
|
||||
/* This is an embedded SLI4 mailbox with an external buffer allocated.
|
||||
* Free the pcmd and then cleanup with the correct routine.
|
||||
*/
|
||||
lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
|
||||
kfree(pcmd);
|
||||
mempool_free(mbox, phba->mbox_mem_pool);
|
||||
lpfc_sli4_mbox_cmd_free(phba, mbox);
|
||||
return byte_cnt;
|
||||
}
|
||||
|
||||
@ -22114,7 +22114,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
|
||||
tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
|
||||
GFP_ATOMIC,
|
||||
&tmp->fcp_cmd_rsp_dma_handle);
|
||||
|
||||
@ -22236,8 +22236,6 @@ lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
|
||||
u32 fip, abort_tag;
|
||||
struct lpfc_nodelist *ndlp = NULL;
|
||||
union lpfc_wqe128 *wqe = &job->wqe;
|
||||
struct lpfc_dmabuf *context2;
|
||||
u32 els_id = LPFC_ELS_ID_DEFAULT;
|
||||
u8 command_type = ELS_COMMAND_NON_FIP;
|
||||
|
||||
fip = phba->hba_flag & HBA_FIP_SUPPORT;
|
||||
@ -22254,21 +22252,12 @@ lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
|
||||
|
||||
switch (cmnd) {
|
||||
case CMD_ELS_REQUEST64_WQE:
|
||||
if (job->cmd_flag & LPFC_IO_LIBDFC)
|
||||
ndlp = job->context_un.ndlp;
|
||||
else
|
||||
ndlp = (struct lpfc_nodelist *)job->context1;
|
||||
|
||||
/* CCP CCPE PV PRI in word10 were set in the memcpy */
|
||||
if (command_type == ELS_COMMAND_FIP)
|
||||
els_id = ((job->cmd_flag & LPFC_FIP_ELS_ID_MASK)
|
||||
>> LPFC_FIP_ELS_ID_SHIFT);
|
||||
ndlp = job->ndlp;
|
||||
|
||||
if_type = bf_get(lpfc_sli_intf_if_type,
|
||||
&phba->sli4_hba.sli_intf);
|
||||
if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
|
||||
context2 = (struct lpfc_dmabuf *)job->context2;
|
||||
pcmd = (u32 *)context2->virt;
|
||||
pcmd = (u32 *)job->cmd_dmabuf->virt;
|
||||
if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
|
||||
*pcmd == ELS_CMD_SCR ||
|
||||
*pcmd == ELS_CMD_RDF ||
|
||||
@ -22301,7 +22290,6 @@ lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
|
||||
bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
|
||||
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
|
||||
|
||||
bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
|
||||
bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
|
||||
bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
|
||||
bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
|
||||
@ -22309,7 +22297,7 @@ lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
|
||||
bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
|
||||
break;
|
||||
case CMD_XMIT_ELS_RSP64_WQE:
|
||||
ndlp = (struct lpfc_nodelist *)job->context1;
|
||||
ndlp = job->ndlp;
|
||||
|
||||
/* word4 */
|
||||
wqe->xmit_els_rsp.word4 = 0;
|
||||
|
@ -35,6 +35,12 @@ typedef enum _lpfc_ctx_cmd {
|
||||
LPFC_CTX_HOST
|
||||
} lpfc_ctx_cmd;
|
||||
|
||||
/* Enumeration to describe the thread lock context. */
|
||||
enum lpfc_mbox_ctx {
|
||||
MBOX_THD_UNLOCKED,
|
||||
MBOX_THD_LOCKED
|
||||
};
|
||||
|
||||
union lpfc_vmid_tag {
|
||||
uint32_t app_id;
|
||||
uint8_t cs_ctl_vmid;
|
||||
@ -77,11 +83,15 @@ struct lpfc_iocbq {
|
||||
|
||||
u32 unsol_rcv_len; /* Receive len in usol path */
|
||||
|
||||
uint8_t num_bdes;
|
||||
uint8_t abort_bls; /* ABTS by initiator or responder */
|
||||
/* Pack the u8's together and make them module-4. */
|
||||
u8 num_bdes; /* Number of BDEs */
|
||||
u8 abort_bls; /* ABTS by initiator or responder */
|
||||
u8 abort_rctl; /* ACC or RJT flag */
|
||||
uint8_t priority; /* OAS priority */
|
||||
uint8_t retry; /* retry counter for IOCB cmd - if needed */
|
||||
u8 priority; /* OAS priority */
|
||||
u8 retry; /* retry counter for IOCB cmd - if needed */
|
||||
u8 rsvd1; /* Pad for u32 */
|
||||
u8 rsvd2; /* Pad for u32 */
|
||||
u8 rsvd3; /* Pad for u32 */
|
||||
|
||||
u32 cmd_flag;
|
||||
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
|
||||
@ -116,18 +126,22 @@ struct lpfc_iocbq {
|
||||
|
||||
uint32_t drvrTimeout; /* driver timeout in seconds */
|
||||
struct lpfc_vport *vport;/* virtual port pointer */
|
||||
void *context1; /* caller context information */
|
||||
void *context2; /* caller context information */
|
||||
void *context3; /* caller context information */
|
||||
struct lpfc_dmabuf *cmd_dmabuf;
|
||||
struct lpfc_dmabuf *rsp_dmabuf;
|
||||
struct lpfc_dmabuf *bpl_dmabuf;
|
||||
uint32_t event_tag; /* LA Event tag */
|
||||
union {
|
||||
wait_queue_head_t *wait_queue;
|
||||
struct lpfc_iocbq *rsp_iocb;
|
||||
struct lpfcMboxq *mbox;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct lpfc_node_rrq *rrq;
|
||||
struct nvmefc_ls_req *nvme_lsreq;
|
||||
struct lpfc_async_xchg_ctx *axchg;
|
||||
struct bsg_job_data *dd_data;
|
||||
} context_un;
|
||||
|
||||
struct lpfc_io_buf *io_buf;
|
||||
struct lpfc_iocbq *rsp_iocb;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
union lpfc_vmid_tag vmid_tag;
|
||||
void (*fabric_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
|
||||
struct lpfc_iocbq *rsp);
|
||||
|
@ -981,6 +981,9 @@ struct lpfc_sli4_hba {
|
||||
#define lpfc_conf_trunk_port3_nd_MASK 0x1
|
||||
uint8_t flash_id;
|
||||
uint8_t asic_rev;
|
||||
uint16_t fawwpn_flag; /* FA-WWPN support state */
|
||||
#define LPFC_FAWWPN_CONFIG 0x1 /* FA-PWWN is configured */
|
||||
#define LPFC_FAWWPN_FABRIC 0x2 /* FA-PWWN success with Fabric */
|
||||
};
|
||||
|
||||
enum lpfc_sge_type {
|
||||
|
@ -20,7 +20,7 @@
|
||||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "14.2.0.1"
|
||||
#define LPFC_DRIVER_VERSION "14.2.0.3"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
|
||||
/* Used for SLI 2/3 */
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
@ -135,12 +135,14 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
|
||||
}
|
||||
|
||||
/*
|
||||
* Grab buffer pointer and clear context1 so we can use
|
||||
* lpfc_sli_issue_box_wait
|
||||
* Wait for the read_sparams mailbox to complete. Driver needs
|
||||
* this per vport to start the FDISC. If the mailbox fails,
|
||||
* just cleanup and return an error unless the failure is a
|
||||
* mailbox timeout. For MBX_TIMEOUT, allow the default
|
||||
* mbox completion handler to take care of the cleanup. This
|
||||
* is safe as the mailbox command isn't one that triggers
|
||||
* another mailbox.
|
||||
*/
|
||||
mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
|
||||
pmb->ctx_buf = NULL;
|
||||
|
||||
pmb->vport = vport;
|
||||
rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
|
||||
if (rc != MBX_SUCCESS) {
|
||||
@ -148,34 +150,29 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"1830 Signal aborted mbxCmd x%x\n",
|
||||
mb->mbxCommand);
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
if (rc != MBX_TIMEOUT)
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb,
|
||||
MBOX_THD_UNLOCKED);
|
||||
return -EINTR;
|
||||
} else {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"1818 VPort failed init, mbxCmd x%x "
|
||||
"READ_SPARM mbxStatus x%x, rc = x%x\n",
|
||||
mb->mbxCommand, mb->mbxStatus, rc);
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
if (rc != MBX_TIMEOUT)
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb,
|
||||
MBOX_THD_UNLOCKED);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
|
||||
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
|
||||
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
|
||||
sizeof (struct lpfc_name));
|
||||
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
|
||||
sizeof (struct lpfc_name));
|
||||
|
||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||
kfree(mp);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
|
||||
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -234,7 +234,7 @@ static void mac53c94_interrupt(int irq, void *dev_id)
|
||||
++mac53c94_errors;
|
||||
writeb(CMD_NOP + CMD_DMA_MODE, ®s->command);
|
||||
}
|
||||
if (cmd == 0) {
|
||||
if (!cmd) {
|
||||
printk(KERN_DEBUG "53c94: interrupt with no command active?\n");
|
||||
return;
|
||||
}
|
||||
|
@ -4607,7 +4607,7 @@ static int __init megaraid_init(void)
|
||||
* major number allocation.
|
||||
*/
|
||||
major = register_chrdev(0, "megadev_legacy", &megadev_fops);
|
||||
if (!major) {
|
||||
if (major < 0) {
|
||||
printk(KERN_WARNING
|
||||
"megaraid: failed to register char device\n");
|
||||
}
|
||||
|
@ -4473,8 +4473,6 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
|
||||
|
||||
for (i = 0; i < max_cmd; i++) {
|
||||
instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
|
||||
GFP_KERNEL);
|
||||
|
@ -2047,8 +2047,6 @@ map_cmd_status(struct fusion_context *fusion,
|
||||
|
||||
scmd->result = (DID_OK << 16) | ext_status;
|
||||
if (ext_status == SAM_STAT_CHECK_CONDITION) {
|
||||
memset(scmd->sense_buffer, 0,
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
memcpy(scmd->sense_buffer, sense,
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
}
|
||||
|
@ -3,5 +3,6 @@
|
||||
config SCSI_MPI3MR
|
||||
tristate "Broadcom MPI3 Storage Controller Device Driver"
|
||||
depends on PCI && SCSI
|
||||
select BLK_DEV_BSGLIB
|
||||
help
|
||||
MPI3 based Storage & RAID Controllers Driver.
|
||||
|
@ -2,3 +2,4 @@
|
||||
obj-m += mpi3mr.o
|
||||
mpi3mr-y += mpi3mr_os.o \
|
||||
mpi3mr_fw.o \
|
||||
mpi3mr_app.o \
|
||||
|
@ -115,57 +115,4 @@ struct mpi3_scsi_io_reply {
|
||||
#define MPI3_SCSI_RSP_ARI0_MASK (0xff000000)
|
||||
#define MPI3_SCSI_RSP_ARI0_SHIFT (24)
|
||||
#define MPI3_SCSI_TASKTAG_UNKNOWN (0xffff)
|
||||
struct mpi3_scsi_task_mgmt_request {
|
||||
__le16 host_tag;
|
||||
u8 ioc_use_only02;
|
||||
u8 function;
|
||||
__le16 ioc_use_only04;
|
||||
u8 ioc_use_only06;
|
||||
u8 msg_flags;
|
||||
__le16 change_count;
|
||||
__le16 dev_handle;
|
||||
__le16 task_host_tag;
|
||||
u8 task_type;
|
||||
u8 reserved0f;
|
||||
__le16 task_request_queue_id;
|
||||
__le16 reserved12;
|
||||
__le32 reserved14;
|
||||
u8 lun[8];
|
||||
};
|
||||
|
||||
#define MPI3_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x08)
|
||||
#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
|
||||
#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK_SET (0x02)
|
||||
#define MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
|
||||
#define MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
|
||||
#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
|
||||
#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
|
||||
#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_ACA (0x08)
|
||||
#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK_SET (0x09)
|
||||
#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_ASYNC_EVENT (0x0a)
|
||||
#define MPI3_SCSITASKMGMT_TASKTYPE_I_T_NEXUS_RESET (0x0b)
|
||||
struct mpi3_scsi_task_mgmt_reply {
|
||||
__le16 host_tag;
|
||||
u8 ioc_use_only02;
|
||||
u8 function;
|
||||
__le16 ioc_use_only04;
|
||||
u8 ioc_use_only06;
|
||||
u8 msg_flags;
|
||||
__le16 ioc_use_only08;
|
||||
__le16 ioc_status;
|
||||
__le32 ioc_log_info;
|
||||
__le32 termination_count;
|
||||
__le32 response_data;
|
||||
__le32 reserved18;
|
||||
};
|
||||
|
||||
#define MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE (0x00)
|
||||
#define MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME (0x02)
|
||||
#define MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED (0x04)
|
||||
#define MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED (0x05)
|
||||
#define MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED (0x08)
|
||||
#define MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN (0x09)
|
||||
#define MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG (0x0a)
|
||||
#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80)
|
||||
#define MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED (0x81)
|
||||
#endif
|
||||
|
@ -38,16 +38,6 @@ struct mpi3_ioc_init_request {
|
||||
#define MPI3_WHOINIT_ROM_BIOS (0x02)
|
||||
#define MPI3_WHOINIT_HOST_DRIVER (0x03)
|
||||
#define MPI3_WHOINIT_MANUFACTURER (0x04)
|
||||
struct mpi3_driver_info_layout {
|
||||
__le32 information_length;
|
||||
u8 driver_signature[12];
|
||||
u8 os_name[16];
|
||||
u8 os_version[12];
|
||||
u8 driver_name[20];
|
||||
u8 driver_version[32];
|
||||
u8 driver_release_date[20];
|
||||
__le32 driver_capabilities;
|
||||
};
|
||||
|
||||
struct mpi3_ioc_facts_request {
|
||||
__le16 host_tag;
|
||||
@ -647,23 +637,6 @@ struct mpi3_event_data_diag_buffer_status_change {
|
||||
#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED (0x01)
|
||||
#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED (0x02)
|
||||
#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED (0x03)
|
||||
#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200)
|
||||
#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100)
|
||||
#define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080)
|
||||
#define MPI3_PEL_LOCALE_FLAGS_CONFIGURATION (0x0040)
|
||||
#define MPI3_PEL_LOCALE_FLAGS_CONTROLER (0x0020)
|
||||
#define MPI3_PEL_LOCALE_FLAGS_SAS (0x0010)
|
||||
#define MPI3_PEL_LOCALE_FLAGS_EPACK (0x0008)
|
||||
#define MPI3_PEL_LOCALE_FLAGS_ENCLOSURE (0x0004)
|
||||
#define MPI3_PEL_LOCALE_FLAGS_PD (0x0002)
|
||||
#define MPI3_PEL_LOCALE_FLAGS_VD (0x0001)
|
||||
#define MPI3_PEL_CLASS_DEBUG (0x00)
|
||||
#define MPI3_PEL_CLASS_PROGRESS (0x01)
|
||||
#define MPI3_PEL_CLASS_INFORMATIONAL (0x02)
|
||||
#define MPI3_PEL_CLASS_WARNING (0x03)
|
||||
#define MPI3_PEL_CLASS_CRITICAL (0x04)
|
||||
#define MPI3_PEL_CLASS_FATAL (0x05)
|
||||
#define MPI3_PEL_CLASS_FAULT (0x06)
|
||||
#define MPI3_PEL_CLEARTYPE_CLEAR (0x00)
|
||||
#define MPI3_PEL_WAITTIME_INFINITE_WAIT (0x00)
|
||||
#define MPI3_PEL_ACTION_GET_SEQNUM (0x01)
|
||||
|
@ -5,24 +5,6 @@
|
||||
*/
|
||||
#ifndef MPI30_PCI_H
|
||||
#define MPI30_PCI_H 1
|
||||
#ifndef MPI3_NVME_ENCAP_CMD_MAX
|
||||
#define MPI3_NVME_ENCAP_CMD_MAX (1)
|
||||
#endif
|
||||
struct mpi3_nvme_encapsulated_request {
|
||||
__le16 host_tag;
|
||||
u8 ioc_use_only02;
|
||||
u8 function;
|
||||
__le16 ioc_use_only04;
|
||||
u8 ioc_use_only06;
|
||||
u8 msg_flags;
|
||||
__le16 change_count;
|
||||
__le16 dev_handle;
|
||||
__le16 encapsulated_command_length;
|
||||
__le16 flags;
|
||||
__le32 data_length;
|
||||
__le32 reserved14[3];
|
||||
__le32 command[MPI3_NVME_ENCAP_CMD_MAX];
|
||||
};
|
||||
|
||||
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_MASK (0x0002)
|
||||
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_FAIL_ONLY (0x0000)
|
||||
@ -30,16 +12,5 @@ struct mpi3_nvme_encapsulated_request {
|
||||
#define MPI3_NVME_FLAGS_SUBMISSIONQ_MASK (0x0001)
|
||||
#define MPI3_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
|
||||
#define MPI3_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0001)
|
||||
struct mpi3_nvme_encapsulated_error_reply {
|
||||
__le16 host_tag;
|
||||
u8 ioc_use_only02;
|
||||
u8 function;
|
||||
__le16 ioc_use_only04;
|
||||
u8 ioc_use_only06;
|
||||
u8 msg_flags;
|
||||
__le16 ioc_use_only08;
|
||||
__le16 ioc_status;
|
||||
__le32 ioc_log_info;
|
||||
__le32 nvme_completion_entry[4];
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include <uapi/scsi/scsi_bsg_mpi3mr.h>
|
||||
|
||||
#include "mpi/mpi30_transport.h"
|
||||
#include "mpi/mpi30_cnfg.h"
|
||||
@ -52,9 +53,10 @@
|
||||
extern spinlock_t mrioc_list_lock;
|
||||
extern struct list_head mrioc_list;
|
||||
extern int prot_mask;
|
||||
extern atomic64_t event_counter;
|
||||
|
||||
#define MPI3MR_DRIVER_VERSION "8.0.0.68.0"
|
||||
#define MPI3MR_DRIVER_RELDATE "10-February-2022"
|
||||
#define MPI3MR_DRIVER_VERSION "8.0.0.69.0"
|
||||
#define MPI3MR_DRIVER_RELDATE "16-March-2022"
|
||||
|
||||
#define MPI3MR_DRIVER_NAME "mpi3mr"
|
||||
#define MPI3MR_DRIVER_LICENSE "GPL"
|
||||
@ -89,7 +91,9 @@ extern int prot_mask;
|
||||
/* Reserved Host Tag definitions */
|
||||
#define MPI3MR_HOSTTAG_INVALID 0xFFFF
|
||||
#define MPI3MR_HOSTTAG_INITCMDS 1
|
||||
#define MPI3MR_HOSTTAG_IOCTLCMDS 2
|
||||
#define MPI3MR_HOSTTAG_BSG_CMDS 2
|
||||
#define MPI3MR_HOSTTAG_PEL_ABORT 3
|
||||
#define MPI3MR_HOSTTAG_PEL_WAIT 4
|
||||
#define MPI3MR_HOSTTAG_BLK_TMS 5
|
||||
|
||||
#define MPI3MR_NUM_DEVRMCMD 16
|
||||
@ -120,6 +124,9 @@ extern int prot_mask;
|
||||
|
||||
#define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */
|
||||
|
||||
#define MPI3MR_SCMD_TIMEOUT (60 * HZ)
|
||||
#define MPI3MR_EH_SCMD_TIMEOUT (60 * HZ)
|
||||
|
||||
/* Internal admin command state definitions*/
|
||||
#define MPI3MR_CMD_NOTUSED 0x8000
|
||||
#define MPI3MR_CMD_COMPLETE 0x0001
|
||||
@ -148,8 +155,10 @@ extern int prot_mask;
|
||||
|
||||
#define MPI3MR_DEFAULT_MDTS (128 * 1024)
|
||||
#define MPI3MR_DEFAULT_PGSZEXP (12)
|
||||
|
||||
/* Command retry count definitions */
|
||||
#define MPI3MR_DEV_RMHS_RETRY_COUNT 3
|
||||
#define MPI3MR_PEL_RETRY_COUNT 3
|
||||
|
||||
/* Default target device queue depth */
|
||||
#define MPI3MR_DEFAULT_SDEV_QD 32
|
||||
@ -175,6 +184,57 @@ extern int prot_mask;
|
||||
/* MSI Index from Reply Queue Index */
|
||||
#define REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, offset) (qidx + offset)
|
||||
|
||||
/*
|
||||
* Maximum data transfer size definitions for management
|
||||
* application commands
|
||||
*/
|
||||
#define MPI3MR_MAX_APP_XFER_SIZE (1 * 1024 * 1024)
|
||||
#define MPI3MR_MAX_APP_XFER_SEGMENTS 512
|
||||
/*
|
||||
* 2048 sectors are for data buffers and additional 512 sectors for
|
||||
* other buffers
|
||||
*/
|
||||
#define MPI3MR_MAX_APP_XFER_SECTORS (2048 + 512)
|
||||
|
||||
/**
|
||||
* struct mpi3mr_nvme_pt_sge - Structure to store SGEs for NVMe
|
||||
* Encapsulated commands.
|
||||
*
|
||||
* @base_addr: Physical address
|
||||
* @length: SGE length
|
||||
* @rsvd: Reserved
|
||||
* @rsvd1: Reserved
|
||||
* @sgl_type: sgl type
|
||||
*/
|
||||
struct mpi3mr_nvme_pt_sge {
|
||||
u64 base_addr;
|
||||
u32 length;
|
||||
u16 rsvd;
|
||||
u8 rsvd1;
|
||||
u8 sgl_type;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct mpi3mr_buf_map - local structure to
|
||||
* track kernel and user buffers associated with an BSG
|
||||
* structure.
|
||||
*
|
||||
* @bsg_buf: BSG buffer virtual address
|
||||
* @bsg_buf_len: BSG buffer length
|
||||
* @kern_buf: Kernel buffer virtual address
|
||||
* @kern_buf_len: Kernel buffer length
|
||||
* @kern_buf_dma: Kernel buffer DMA address
|
||||
* @data_dir: Data direction.
|
||||
*/
|
||||
struct mpi3mr_buf_map {
|
||||
void *bsg_buf;
|
||||
u32 bsg_buf_len;
|
||||
void *kern_buf;
|
||||
u32 kern_buf_len;
|
||||
dma_addr_t kern_buf_dma;
|
||||
u8 data_dir;
|
||||
};
|
||||
|
||||
/* IOC State definitions */
|
||||
enum mpi3mr_iocstate {
|
||||
MRIOC_STATE_READY = 1,
|
||||
@ -189,10 +249,10 @@ enum mpi3mr_iocstate {
|
||||
enum mpi3mr_reset_reason {
|
||||
MPI3MR_RESET_FROM_BRINGUP = 1,
|
||||
MPI3MR_RESET_FROM_FAULT_WATCH = 2,
|
||||
MPI3MR_RESET_FROM_IOCTL = 3,
|
||||
MPI3MR_RESET_FROM_APP = 3,
|
||||
MPI3MR_RESET_FROM_EH_HOS = 4,
|
||||
MPI3MR_RESET_FROM_TM_TIMEOUT = 5,
|
||||
MPI3MR_RESET_FROM_IOCTL_TIMEOUT = 6,
|
||||
MPI3MR_RESET_FROM_APP_TIMEOUT = 6,
|
||||
MPI3MR_RESET_FROM_MUR_FAILURE = 7,
|
||||
MPI3MR_RESET_FROM_CTLR_CLEANUP = 8,
|
||||
MPI3MR_RESET_FROM_CIACTIV_FAULT = 9,
|
||||
@ -543,6 +603,7 @@ struct mpi3mr_sdev_priv_data {
|
||||
* @ioc_status: IOC status from the firmware
|
||||
* @ioc_loginfo:IOC log info from the firmware
|
||||
* @is_waiting: Is the command issued in block mode
|
||||
* @is_sense: Is Sense data present
|
||||
* @retry_count: Retry count for retriable commands
|
||||
* @host_tag: Host tag used by the command
|
||||
* @callback: Callback for non blocking commands
|
||||
@ -558,6 +619,7 @@ struct mpi3mr_drv_cmd {
|
||||
u16 ioc_status;
|
||||
u32 ioc_loginfo;
|
||||
u8 is_waiting;
|
||||
u8 is_sense;
|
||||
u8 retry_count;
|
||||
u16 host_tag;
|
||||
|
||||
@ -685,6 +747,7 @@ struct scmd_priv {
|
||||
* @chain_bitmap_sz: Chain buffer allocator bitmap size
|
||||
* @chain_bitmap: Chain buffer allocator bitmap
|
||||
* @chain_buf_lock: Chain buffer list lock
|
||||
* @bsg_cmds: Command tracker for BSG command
|
||||
* @host_tm_cmds: Command tracker for task management commands
|
||||
* @dev_rmhs_cmds: Command tracker for device removal commands
|
||||
* @evtack_cmds: Command tracker for event ack commands
|
||||
@ -704,16 +767,35 @@ struct scmd_priv {
|
||||
* @reset_waitq: Controller reset wait queue
|
||||
* @prepare_for_reset: Prepare for reset event received
|
||||
* @prepare_for_reset_timeout_counter: Prepare for reset timeout
|
||||
* @prp_list_virt: NVMe encapsulated PRP list virtual base
|
||||
* @prp_list_dma: NVMe encapsulated PRP list DMA
|
||||
* @prp_sz: NVME encapsulated PRP list size
|
||||
* @diagsave_timeout: Diagnostic information save timeout
|
||||
* @logging_level: Controller debug logging level
|
||||
* @flush_io_count: I/O count to flush after reset
|
||||
* @current_event: Firmware event currently in process
|
||||
* @driver_info: Driver, Kernel, OS information to firmware
|
||||
* @change_count: Topology change count
|
||||
* @pel_enabled: Persistent Event Log(PEL) enabled or not
|
||||
* @pel_abort_requested: PEL abort is requested or not
|
||||
* @pel_class: PEL Class identifier
|
||||
* @pel_locale: PEL Locale identifier
|
||||
* @pel_cmds: Command tracker for PEL wait command
|
||||
* @pel_abort_cmd: Command tracker for PEL abort command
|
||||
* @pel_newest_seqnum: Newest PEL sequenece number
|
||||
* @pel_seqnum_virt: PEL sequence number virtual address
|
||||
* @pel_seqnum_dma: PEL sequence number DMA address
|
||||
* @pel_seqnum_sz: PEL sequenece number size
|
||||
* @op_reply_q_offset: Operational reply queue offset with MSIx
|
||||
* @default_qcount: Total Default queues
|
||||
* @active_poll_qcount: Currently active poll queue count
|
||||
* @requested_poll_qcount: User requested poll queue count
|
||||
* @bsg_dev: BSG device structure
|
||||
* @bsg_queue: Request queue for BSG device
|
||||
* @stop_bsgs: Stop BSG request flag
|
||||
* @logdata_buf: Circular buffer to store log data entries
|
||||
* @logdata_buf_idx: Index of entry in buffer to store
|
||||
* @logdata_entry_sz: log data entry size
|
||||
*/
|
||||
struct mpi3mr_ioc {
|
||||
struct list_head list;
|
||||
@ -820,6 +902,7 @@ struct mpi3mr_ioc {
|
||||
void *chain_bitmap;
|
||||
spinlock_t chain_buf_lock;
|
||||
|
||||
struct mpi3mr_drv_cmd bsg_cmds;
|
||||
struct mpi3mr_drv_cmd host_tm_cmds;
|
||||
struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD];
|
||||
struct mpi3mr_drv_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD];
|
||||
@ -842,6 +925,10 @@ struct mpi3mr_ioc {
|
||||
u8 prepare_for_reset;
|
||||
u16 prepare_for_reset_timeout_counter;
|
||||
|
||||
void *prp_list_virt;
|
||||
dma_addr_t prp_list_dma;
|
||||
u32 prp_sz;
|
||||
|
||||
u16 diagsave_timeout;
|
||||
int logging_level;
|
||||
u16 flush_io_count;
|
||||
@ -849,11 +936,30 @@ struct mpi3mr_ioc {
|
||||
struct mpi3mr_fwevt *current_event;
|
||||
struct mpi3_driver_info_layout driver_info;
|
||||
u16 change_count;
|
||||
u16 op_reply_q_offset;
|
||||
|
||||
u8 pel_enabled;
|
||||
u8 pel_abort_requested;
|
||||
u8 pel_class;
|
||||
u16 pel_locale;
|
||||
struct mpi3mr_drv_cmd pel_cmds;
|
||||
struct mpi3mr_drv_cmd pel_abort_cmd;
|
||||
|
||||
u32 pel_newest_seqnum;
|
||||
void *pel_seqnum_virt;
|
||||
dma_addr_t pel_seqnum_dma;
|
||||
u32 pel_seqnum_sz;
|
||||
|
||||
u16 op_reply_q_offset;
|
||||
u16 default_qcount;
|
||||
u16 active_poll_qcount;
|
||||
u16 requested_poll_qcount;
|
||||
|
||||
struct device *bsg_dev;
|
||||
struct request_queue *bsg_queue;
|
||||
u8 stop_bsgs;
|
||||
u8 *logdata_buf;
|
||||
u16 logdata_buf_idx;
|
||||
u16 logdata_entry_sz;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -866,6 +972,7 @@ struct mpi3mr_ioc {
|
||||
* @send_ack: Event acknowledgment required or not
|
||||
* @process_evt: Bottomhalf processing required or not
|
||||
* @evt_ctx: Event context to send in Ack
|
||||
* @event_data_size: size of the event data in bytes
|
||||
* @pending_at_sml: waiting for device add/remove API to complete
|
||||
* @discard: discard this event
|
||||
* @ref_count: kref count
|
||||
@ -879,6 +986,7 @@ struct mpi3mr_fwevt {
|
||||
bool send_ack;
|
||||
bool process_evt;
|
||||
u32 evt_ctx;
|
||||
u16 event_data_size;
|
||||
bool pending_at_sml;
|
||||
bool discard;
|
||||
struct kref ref_count;
|
||||
@ -962,5 +1070,20 @@ void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
|
||||
int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
|
||||
struct op_reply_qinfo *op_reply_q);
|
||||
int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
|
||||
|
||||
void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc);
|
||||
void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc);
|
||||
int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
|
||||
u16 handle, uint lun, u16 htag, ulong timeout,
|
||||
struct mpi3mr_drv_cmd *drv_cmd,
|
||||
u8 *resp_code, struct scsi_cmnd *scmd);
|
||||
struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
|
||||
struct mpi3mr_ioc *mrioc, u16 handle);
|
||||
void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
|
||||
struct mpi3mr_drv_cmd *drv_cmd);
|
||||
int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
|
||||
struct mpi3mr_drv_cmd *drv_cmd);
|
||||
void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data,
|
||||
u16 event_data_size);
|
||||
extern const struct attribute_group *mpi3mr_host_groups[];
|
||||
extern const struct attribute_group *mpi3mr_dev_groups[];
|
||||
#endif /*MPI3MR_H_INCLUDED*/
|
||||
|
1864
drivers/scsi/mpi3mr/mpi3mr_app.c
Normal file
1864
drivers/scsi/mpi3mr/mpi3mr_app.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -23,8 +23,8 @@
|
||||
#define MPI3_DEBUG_RESET 0x00000020
|
||||
#define MPI3_DEBUG_SCSI_ERROR 0x00000040
|
||||
#define MPI3_DEBUG_REPLY 0x00000080
|
||||
#define MPI3_DEBUG_IOCTL_ERROR 0x00008000
|
||||
#define MPI3_DEBUG_IOCTL_INFO 0x00010000
|
||||
#define MPI3_DEBUG_BSG_ERROR 0x00008000
|
||||
#define MPI3_DEBUG_BSG_INFO 0x00010000
|
||||
#define MPI3_DEBUG_SCSI_INFO 0x00020000
|
||||
#define MPI3_DEBUG 0x01000000
|
||||
#define MPI3_DEBUG_SG 0x02000000
|
||||
@ -110,20 +110,45 @@
|
||||
} while (0)
|
||||
|
||||
|
||||
#define dprint_ioctl_info(ioc, fmt, ...) \
|
||||
#define dprint_bsg_info(ioc, fmt, ...) \
|
||||
do { \
|
||||
if (ioc->logging_level & MPI3_DEBUG_IOCTL_INFO) \
|
||||
if (ioc->logging_level & MPI3_DEBUG_BSG_INFO) \
|
||||
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define dprint_ioctl_err(ioc, fmt, ...) \
|
||||
#define dprint_bsg_err(ioc, fmt, ...) \
|
||||
do { \
|
||||
if (ioc->logging_level & MPI3_DEBUG_IOCTL_ERROR) \
|
||||
if (ioc->logging_level & MPI3_DEBUG_BSG_ERROR) \
|
||||
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#endif /* MPT3SAS_DEBUG_H_INCLUDED */
|
||||
|
||||
/**
|
||||
* dprint_dump - print contents of a memory buffer
|
||||
* @req: Pointer to a memory buffer
|
||||
* @sz: Memory buffer size
|
||||
* @namestr: Name String to identify the buffer type
|
||||
*/
|
||||
static inline void
|
||||
dprint_dump(void *req, int sz, const char *name_string)
|
||||
{
|
||||
int i;
|
||||
__le32 *mfp = (__le32 *)req;
|
||||
|
||||
sz = sz/4;
|
||||
if (name_string)
|
||||
pr_info("%s:\n\t", name_string);
|
||||
else
|
||||
pr_info("request:\n\t");
|
||||
for (i = 0; i < sz; i++) {
|
||||
if (i && ((i % 8) == 0))
|
||||
pr_info("\n\t");
|
||||
pr_info("%08x ", le32_to_cpu(mfp[i]));
|
||||
}
|
||||
pr_info("\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* dprint_dump_req - print message frame contents
|
||||
* @req: pointer to message frame
|
||||
|
@ -15,6 +15,8 @@ mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason);
|
||||
static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
|
||||
static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
|
||||
struct mpi3_ioc_facts_data *facts_data);
|
||||
static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
|
||||
struct mpi3mr_drv_cmd *drv_cmd);
|
||||
|
||||
static int poll_queues;
|
||||
module_param(poll_queues, int, 0444);
|
||||
@ -297,8 +299,14 @@ mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
|
||||
switch (host_tag) {
|
||||
case MPI3MR_HOSTTAG_INITCMDS:
|
||||
return &mrioc->init_cmds;
|
||||
case MPI3MR_HOSTTAG_BSG_CMDS:
|
||||
return &mrioc->bsg_cmds;
|
||||
case MPI3MR_HOSTTAG_BLK_TMS:
|
||||
return &mrioc->host_tm_cmds;
|
||||
case MPI3MR_HOSTTAG_PEL_ABORT:
|
||||
return &mrioc->pel_abort_cmd;
|
||||
case MPI3MR_HOSTTAG_PEL_WAIT:
|
||||
return &mrioc->pel_cmds;
|
||||
case MPI3MR_HOSTTAG_INVALID:
|
||||
if (def_reply && def_reply->function ==
|
||||
MPI3_FUNCTION_EVENT_NOTIFICATION)
|
||||
@ -865,10 +873,10 @@ static const struct {
|
||||
} mpi3mr_reset_reason_codes[] = {
|
||||
{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
|
||||
{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
|
||||
{ MPI3MR_RESET_FROM_IOCTL, "application invocation" },
|
||||
{ MPI3MR_RESET_FROM_APP, "application invocation" },
|
||||
{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
|
||||
{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
|
||||
{ MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" },
|
||||
{ MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
|
||||
{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
|
||||
{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
|
||||
{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
|
||||
@ -2813,6 +2821,10 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
|
||||
if (!mrioc->init_cmds.reply)
|
||||
goto out_failed;
|
||||
|
||||
mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
|
||||
if (!mrioc->bsg_cmds.reply)
|
||||
goto out_failed;
|
||||
|
||||
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
|
||||
mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
|
||||
GFP_KERNEL);
|
||||
@ -2831,6 +2843,14 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
|
||||
if (!mrioc->host_tm_cmds.reply)
|
||||
goto out_failed;
|
||||
|
||||
mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
|
||||
if (!mrioc->pel_cmds.reply)
|
||||
goto out_failed;
|
||||
|
||||
mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
|
||||
if (!mrioc->pel_abort_cmd.reply)
|
||||
goto out_failed;
|
||||
|
||||
mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
|
||||
if (mrioc->facts.max_devhandle % 8)
|
||||
mrioc->dev_handle_bitmap_sz++;
|
||||
@ -3728,6 +3748,18 @@ retry_init:
|
||||
goto out_failed;
|
||||
}
|
||||
|
||||
if (!mrioc->pel_seqnum_virt) {
|
||||
dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
|
||||
mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
|
||||
mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
|
||||
mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
|
||||
GFP_KERNEL);
|
||||
if (!mrioc->pel_seqnum_virt) {
|
||||
retval = -ENOMEM;
|
||||
goto out_failed_noretry;
|
||||
}
|
||||
}
|
||||
|
||||
retval = mpi3mr_enable_events(mrioc);
|
||||
if (retval) {
|
||||
ioc_err(mrioc, "failed to enable events %d\n",
|
||||
@ -3837,6 +3869,18 @@ retry_init:
|
||||
goto out_failed;
|
||||
}
|
||||
|
||||
if (!mrioc->pel_seqnum_virt) {
|
||||
dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
|
||||
mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
|
||||
mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
|
||||
mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
|
||||
GFP_KERNEL);
|
||||
if (!mrioc->pel_seqnum_virt) {
|
||||
retval = -ENOMEM;
|
||||
goto out_failed_noretry;
|
||||
}
|
||||
}
|
||||
|
||||
if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
|
||||
ioc_err(mrioc,
|
||||
"cannot create minimum number of operational queues expected:%d created:%d\n",
|
||||
@ -3948,8 +3992,14 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
|
||||
|
||||
if (mrioc->init_cmds.reply) {
|
||||
memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
|
||||
memset(mrioc->bsg_cmds.reply, 0,
|
||||
sizeof(*mrioc->bsg_cmds.reply));
|
||||
memset(mrioc->host_tm_cmds.reply, 0,
|
||||
sizeof(*mrioc->host_tm_cmds.reply));
|
||||
memset(mrioc->pel_cmds.reply, 0,
|
||||
sizeof(*mrioc->pel_cmds.reply));
|
||||
memset(mrioc->pel_abort_cmd.reply, 0,
|
||||
sizeof(*mrioc->pel_abort_cmd.reply));
|
||||
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
|
||||
memset(mrioc->dev_rmhs_cmds[i].reply, 0,
|
||||
sizeof(*mrioc->dev_rmhs_cmds[i].reply));
|
||||
@ -4050,9 +4100,18 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
|
||||
kfree(mrioc->init_cmds.reply);
|
||||
mrioc->init_cmds.reply = NULL;
|
||||
|
||||
kfree(mrioc->bsg_cmds.reply);
|
||||
mrioc->bsg_cmds.reply = NULL;
|
||||
|
||||
kfree(mrioc->host_tm_cmds.reply);
|
||||
mrioc->host_tm_cmds.reply = NULL;
|
||||
|
||||
kfree(mrioc->pel_cmds.reply);
|
||||
mrioc->pel_cmds.reply = NULL;
|
||||
|
||||
kfree(mrioc->pel_abort_cmd.reply);
|
||||
mrioc->pel_abort_cmd.reply = NULL;
|
||||
|
||||
for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
|
||||
kfree(mrioc->evtack_cmds[i].reply);
|
||||
mrioc->evtack_cmds[i].reply = NULL;
|
||||
@ -4101,6 +4160,16 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
|
||||
mrioc->admin_req_base, mrioc->admin_req_dma);
|
||||
mrioc->admin_req_base = NULL;
|
||||
}
|
||||
|
||||
if (mrioc->pel_seqnum_virt) {
|
||||
dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
|
||||
mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
|
||||
mrioc->pel_seqnum_virt = NULL;
|
||||
}
|
||||
|
||||
kfree(mrioc->logdata_buf);
|
||||
mrioc->logdata_buf = NULL;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4235,6 +4304,8 @@ static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
|
||||
|
||||
cmdptr = &mrioc->init_cmds;
|
||||
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
|
||||
cmdptr = &mrioc->bsg_cmds;
|
||||
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
|
||||
cmdptr = &mrioc->host_tm_cmds;
|
||||
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
|
||||
|
||||
@ -4247,6 +4318,254 @@ static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
|
||||
cmdptr = &mrioc->evtack_cmds[i];
|
||||
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
|
||||
}
|
||||
|
||||
cmdptr = &mrioc->pel_cmds;
|
||||
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
|
||||
|
||||
cmdptr = &mrioc->pel_abort_cmd;
|
||||
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* mpi3mr_pel_wait_post - Issue PEL Wait
|
||||
* @mrioc: Adapter instance reference
|
||||
* @drv_cmd: Internal command tracker
|
||||
*
|
||||
* Issue PEL Wait MPI request through admin queue and return.
|
||||
*
|
||||
* Return: Nothing.
|
||||
*/
|
||||
static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
|
||||
struct mpi3mr_drv_cmd *drv_cmd)
|
||||
{
|
||||
struct mpi3_pel_req_action_wait pel_wait;
|
||||
|
||||
mrioc->pel_abort_requested = false;
|
||||
|
||||
memset(&pel_wait, 0, sizeof(pel_wait));
|
||||
drv_cmd->state = MPI3MR_CMD_PENDING;
|
||||
drv_cmd->is_waiting = 0;
|
||||
drv_cmd->callback = mpi3mr_pel_wait_complete;
|
||||
drv_cmd->ioc_status = 0;
|
||||
drv_cmd->ioc_loginfo = 0;
|
||||
pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
|
||||
pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
|
||||
pel_wait.action = MPI3_PEL_ACTION_WAIT;
|
||||
pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
|
||||
pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
|
||||
pel_wait.class = cpu_to_le16(mrioc->pel_class);
|
||||
pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
|
||||
dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
|
||||
mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
|
||||
|
||||
if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
|
||||
dprint_bsg_err(mrioc,
|
||||
"Issuing PELWait: Admin post failed\n");
|
||||
drv_cmd->state = MPI3MR_CMD_NOTUSED;
|
||||
drv_cmd->callback = NULL;
|
||||
drv_cmd->retry_count = 0;
|
||||
mrioc->pel_enabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
|
||||
* @mrioc: Adapter instance reference
|
||||
* @drv_cmd: Internal command tracker
|
||||
*
|
||||
* Issue PEL get sequence number MPI request through admin queue
|
||||
* and return.
|
||||
*
|
||||
* Return: 0 on success, non-zero on failure.
|
||||
*/
|
||||
int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
|
||||
struct mpi3mr_drv_cmd *drv_cmd)
|
||||
{
|
||||
struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
|
||||
u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
|
||||
int retval = 0;
|
||||
|
||||
memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
|
||||
mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
|
||||
mrioc->pel_cmds.is_waiting = 0;
|
||||
mrioc->pel_cmds.ioc_status = 0;
|
||||
mrioc->pel_cmds.ioc_loginfo = 0;
|
||||
mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
|
||||
pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
|
||||
pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
|
||||
pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
|
||||
mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
|
||||
mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
|
||||
|
||||
retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
|
||||
sizeof(pel_getseq_req), 0);
|
||||
if (retval) {
|
||||
if (drv_cmd) {
|
||||
drv_cmd->state = MPI3MR_CMD_NOTUSED;
|
||||
drv_cmd->callback = NULL;
|
||||
drv_cmd->retry_count = 0;
|
||||
}
|
||||
mrioc->pel_enabled = false;
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* mpi3mr_pel_wait_complete - PELWait Completion callback
|
||||
* @mrioc: Adapter instance reference
|
||||
* @drv_cmd: Internal command tracker
|
||||
*
|
||||
* This is a callback handler for the PELWait request and
|
||||
* firmware completes a PELWait request when it is aborted or a
|
||||
* new PEL entry is available. This sends AEN to the application
|
||||
* and if the PELwait completion is not due to PELAbort then
|
||||
* this will send a request for new PEL Sequence number
|
||||
*
|
||||
* Return: Nothing.
|
||||
*/
|
||||
static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
|
||||
struct mpi3mr_drv_cmd *drv_cmd)
|
||||
{
|
||||
struct mpi3_pel_reply *pel_reply = NULL;
|
||||
u16 ioc_status, pe_log_status;
|
||||
bool do_retry = false;
|
||||
|
||||
if (drv_cmd->state & MPI3MR_CMD_RESET)
|
||||
goto cleanup_drv_cmd;
|
||||
|
||||
ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
|
||||
if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
|
||||
ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
|
||||
__func__, ioc_status, drv_cmd->ioc_loginfo);
|
||||
dprint_bsg_err(mrioc,
|
||||
"pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
|
||||
ioc_status, drv_cmd->ioc_loginfo);
|
||||
do_retry = true;
|
||||
}
|
||||
|
||||
if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
|
||||
pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
|
||||
|
||||
if (!pel_reply) {
|
||||
dprint_bsg_err(mrioc,
|
||||
"pel_wait: failed due to no reply\n");
|
||||
goto out_failed;
|
||||
}
|
||||
|
||||
pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
|
||||
if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
|
||||
(pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
|
||||
ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
|
||||
__func__, pe_log_status);
|
||||
dprint_bsg_err(mrioc,
|
||||
"pel_wait: failed due to pel_log_status(0x%04x)\n",
|
||||
pe_log_status);
|
||||
do_retry = true;
|
||||
}
|
||||
|
||||
if (do_retry) {
|
||||
if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
|
||||
drv_cmd->retry_count++;
|
||||
dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
|
||||
drv_cmd->retry_count);
|
||||
mpi3mr_pel_wait_post(mrioc, drv_cmd);
|
||||
return;
|
||||
}
|
||||
dprint_bsg_err(mrioc,
|
||||
"pel_wait: failed after all retries(%d)\n",
|
||||
drv_cmd->retry_count);
|
||||
goto out_failed;
|
||||
}
|
||||
atomic64_inc(&event_counter);
|
||||
if (!mrioc->pel_abort_requested) {
|
||||
mrioc->pel_cmds.retry_count = 0;
|
||||
mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
|
||||
}
|
||||
|
||||
return;
|
||||
out_failed:
|
||||
mrioc->pel_enabled = false;
|
||||
cleanup_drv_cmd:
|
||||
drv_cmd->state = MPI3MR_CMD_NOTUSED;
|
||||
drv_cmd->callback = NULL;
|
||||
drv_cmd->retry_count = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
|
||||
* @mrioc: Adapter instance reference
|
||||
* @drv_cmd: Internal command tracker
|
||||
*
|
||||
* This is a callback handler for the PEL get sequence number
|
||||
* request and a new PEL wait request will be issued to the
|
||||
* firmware from this
|
||||
*
|
||||
* Return: Nothing.
|
||||
*/
|
||||
void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
|
||||
struct mpi3mr_drv_cmd *drv_cmd)
|
||||
{
|
||||
struct mpi3_pel_reply *pel_reply = NULL;
|
||||
struct mpi3_pel_seq *pel_seqnum_virt;
|
||||
u16 ioc_status;
|
||||
bool do_retry = false;
|
||||
|
||||
pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
|
||||
|
||||
if (drv_cmd->state & MPI3MR_CMD_RESET)
|
||||
goto cleanup_drv_cmd;
|
||||
|
||||
ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
|
||||
if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
|
||||
dprint_bsg_err(mrioc,
|
||||
"pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
|
||||
ioc_status, drv_cmd->ioc_loginfo);
|
||||
do_retry = true;
|
||||
}
|
||||
|
||||
if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
|
||||
pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
|
||||
if (!pel_reply) {
|
||||
dprint_bsg_err(mrioc,
|
||||
"pel_get_seqnum: failed due to no reply\n");
|
||||
goto out_failed;
|
||||
}
|
||||
|
||||
if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
|
||||
dprint_bsg_err(mrioc,
|
||||
"pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
|
||||
le16_to_cpu(pel_reply->pe_log_status));
|
||||
do_retry = true;
|
||||
}
|
||||
|
||||
if (do_retry) {
|
||||
if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
|
||||
drv_cmd->retry_count++;
|
||||
dprint_bsg_err(mrioc,
|
||||
"pel_get_seqnum: retrying(%d)\n",
|
||||
drv_cmd->retry_count);
|
||||
mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
|
||||
return;
|
||||
}
|
||||
|
||||
dprint_bsg_err(mrioc,
|
||||
"pel_get_seqnum: failed after all retries(%d)\n",
|
||||
drv_cmd->retry_count);
|
||||
goto out_failed;
|
||||
}
|
||||
mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
|
||||
drv_cmd->retry_count = 0;
|
||||
mpi3mr_pel_wait_post(mrioc, drv_cmd);
|
||||
|
||||
return;
|
||||
out_failed:
|
||||
mrioc->pel_enabled = false;
|
||||
cleanup_drv_cmd:
|
||||
drv_cmd->state = MPI3MR_CMD_NOTUSED;
|
||||
drv_cmd->callback = NULL;
|
||||
drv_cmd->retry_count = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4258,7 +4577,7 @@ static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
|
||||
* This is an handler for recovering controller by issuing soft
|
||||
* reset are diag fault reset. This is a blocking function and
|
||||
* when one reset is executed if any other resets they will be
|
||||
* blocked. All IOCTLs/IO will be blocked during the reset. If
|
||||
* blocked. All BSG requests will be blocked during the reset. If
|
||||
* controller reset is successful then the controller will be
|
||||
* reinitalized, otherwise the controller will be marked as not
|
||||
* recoverable
|
||||
@ -4305,6 +4624,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
|
||||
mpi3mr_reset_rc_name(reset_reason));
|
||||
|
||||
mrioc->reset_in_progress = 1;
|
||||
mrioc->stop_bsgs = 1;
|
||||
mrioc->prev_reset_result = -1;
|
||||
|
||||
if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
|
||||
@ -4369,6 +4689,12 @@ out:
|
||||
if (!retval) {
|
||||
mrioc->diagsave_timeout = 0;
|
||||
mrioc->reset_in_progress = 0;
|
||||
mrioc->pel_abort_requested = 0;
|
||||
if (mrioc->pel_enabled) {
|
||||
mrioc->pel_cmds.retry_count = 0;
|
||||
mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
|
||||
}
|
||||
|
||||
mpi3mr_rfresh_tgtdevs(mrioc);
|
||||
mrioc->ts_update_counter = 0;
|
||||
spin_lock_irqsave(&mrioc->watchdog_lock, flags);
|
||||
@ -4377,6 +4703,9 @@ out:
|
||||
&mrioc->watchdog_work,
|
||||
msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
|
||||
spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
|
||||
mrioc->stop_bsgs = 0;
|
||||
if (mrioc->pel_enabled)
|
||||
atomic64_inc(&event_counter);
|
||||
} else {
|
||||
mpi3mr_issue_reset(mrioc,
|
||||
MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
|
||||
|
@ -14,6 +14,7 @@ LIST_HEAD(mrioc_list);
|
||||
DEFINE_SPINLOCK(mrioc_list_lock);
|
||||
static int mrioc_ids;
|
||||
static int warn_non_secure_ctlr;
|
||||
atomic64_t event_counter;
|
||||
|
||||
MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
|
||||
MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
|
||||
@ -634,7 +635,7 @@ found_tgtdev:
|
||||
*
|
||||
* Return: Target device reference.
|
||||
*/
|
||||
static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
|
||||
struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
|
||||
struct mpi3mr_ioc *mrioc, u16 handle)
|
||||
{
|
||||
struct mpi3mr_tgt_dev *tgtdev;
|
||||
@ -910,8 +911,10 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
|
||||
|
||||
list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
|
||||
list) {
|
||||
if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
|
||||
tgtdev->host_exposed) {
|
||||
if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
|
||||
dprint_reset(mrioc, "removing target device with perst_id(%d)\n",
|
||||
tgtdev->perst_id);
|
||||
if (tgtdev->host_exposed)
|
||||
mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
|
||||
mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
|
||||
mpi3mr_tgtdev_put(tgtdev);
|
||||
@ -1415,6 +1418,23 @@ static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mpi3mr_logdata_evt_bh - Log data event bottomhalf
|
||||
* @mrioc: Adapter instance reference
|
||||
* @fwevt: Firmware event reference
|
||||
*
|
||||
* Extracts the event data and calls application interfacing
|
||||
* function to process the event further.
|
||||
*
|
||||
* Return: Nothing.
|
||||
*/
|
||||
static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc,
|
||||
struct mpi3mr_fwevt *fwevt)
|
||||
{
|
||||
mpi3mr_app_save_logdata(mrioc, fwevt->event_data,
|
||||
fwevt->event_data_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* mpi3mr_fwevt_bh - Firmware event bottomhalf handler
|
||||
* @mrioc: Adapter instance reference
|
||||
@ -1467,6 +1487,11 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
|
||||
mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
|
||||
break;
|
||||
}
|
||||
case MPI3_EVENT_LOG_DATA:
|
||||
{
|
||||
mpi3mr_logdata_evt_bh(mrioc, fwevt);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -2298,6 +2323,7 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
|
||||
break;
|
||||
}
|
||||
case MPI3_EVENT_DEVICE_INFO_CHANGED:
|
||||
case MPI3_EVENT_LOG_DATA:
|
||||
{
|
||||
process_evt_bh = 1;
|
||||
break;
|
||||
@ -2996,7 +3022,7 @@ inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc)
|
||||
*
|
||||
* Return: 0 on success, non-zero on errors
|
||||
*/
|
||||
static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
|
||||
int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
|
||||
u16 handle, uint lun, u16 htag, ulong timeout,
|
||||
struct mpi3mr_drv_cmd *drv_cmd,
|
||||
u8 *resp_code, struct scsi_cmnd *scmd)
|
||||
@ -3589,6 +3615,7 @@ static int mpi3mr_scan_finished(struct Scsi_Host *shost,
|
||||
|
||||
mpi3mr_start_watchdog(mrioc);
|
||||
mrioc->is_driver_loading = 0;
|
||||
mrioc->stop_bsgs = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -3700,6 +3727,10 @@ static int mpi3mr_slave_configure(struct scsi_device *sdev)
|
||||
return -ENXIO;
|
||||
|
||||
mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth);
|
||||
|
||||
sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT;
|
||||
blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT);
|
||||
|
||||
switch (tgt_dev->dev_type) {
|
||||
case MPI3_DEVICE_DEVFORM_PCIE:
|
||||
/*The block layer hw sector size = 512*/
|
||||
@ -3971,6 +4002,12 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
|
||||
int iprio_class;
|
||||
u8 is_pcie_dev = 0;
|
||||
|
||||
if (mrioc->unrecoverable) {
|
||||
scmd->result = DID_ERROR << 16;
|
||||
scsi_done(scmd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
sdev_priv_data = scmd->device->hostdata;
|
||||
if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
|
||||
scmd->result = DID_NO_CONNECT << 16;
|
||||
@ -4109,6 +4146,8 @@ static struct scsi_host_template mpi3mr_driver_template = {
|
||||
.max_segment_size = 0xffffffff,
|
||||
.track_queue_depth = 1,
|
||||
.cmd_size = sizeof(struct scmd_priv),
|
||||
.shost_groups = mpi3mr_host_groups,
|
||||
.sdev_groups = mpi3mr_dev_groups,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -4259,6 +4298,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
mutex_init(&mrioc->reset_mutex);
|
||||
mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
|
||||
mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
|
||||
mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS);
|
||||
|
||||
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
|
||||
mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
|
||||
@ -4271,6 +4311,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
mrioc->logging_level = logging_level;
|
||||
mrioc->shost = shost;
|
||||
mrioc->pdev = pdev;
|
||||
mrioc->stop_bsgs = 1;
|
||||
|
||||
/* init shost parameters */
|
||||
shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
|
||||
@ -4345,6 +4386,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
}
|
||||
|
||||
scsi_scan_host(shost);
|
||||
mpi3mr_bsg_init(mrioc);
|
||||
return retval;
|
||||
|
||||
addhost_failed:
|
||||
@ -4389,6 +4431,7 @@ static void mpi3mr_remove(struct pci_dev *pdev)
|
||||
while (mrioc->reset_in_progress || mrioc->is_driver_loading)
|
||||
ssleep(1);
|
||||
|
||||
mpi3mr_bsg_exit(mrioc);
|
||||
mrioc->stop_drv_processing = 1;
|
||||
mpi3mr_cleanup_fwevt_list(mrioc);
|
||||
spin_lock_irqsave(&mrioc->fwevt_lock, flags);
|
||||
@ -4563,6 +4606,12 @@ static struct pci_driver mpi3mr_pci_driver = {
|
||||
#endif
|
||||
};
|
||||
|
||||
static ssize_t event_counter_show(struct device_driver *dd, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%llu\n", atomic64_read(&event_counter));
|
||||
}
|
||||
static DRIVER_ATTR_RO(event_counter);
|
||||
|
||||
static int __init mpi3mr_init(void)
|
||||
{
|
||||
int ret_val;
|
||||
@ -4571,6 +4620,16 @@ static int __init mpi3mr_init(void)
|
||||
MPI3MR_DRIVER_VERSION);
|
||||
|
||||
ret_val = pci_register_driver(&mpi3mr_pci_driver);
|
||||
if (ret_val) {
|
||||
pr_err("%s failed to load due to pci register driver failure\n",
|
||||
MPI3MR_DRIVER_NAME);
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
ret_val = driver_create_file(&mpi3mr_pci_driver.driver,
|
||||
&driver_attr_event_counter);
|
||||
if (ret_val)
|
||||
pci_unregister_driver(&mpi3mr_pci_driver);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
@ -4585,6 +4644,8 @@ static void __exit mpi3mr_exit(void)
|
||||
pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
|
||||
MPI3MR_DRIVER_VERSION);
|
||||
|
||||
driver_remove_file(&mpi3mr_pci_driver.driver,
|
||||
&driver_attr_event_counter);
|
||||
pci_unregister_driver(&mpi3mr_pci_driver);
|
||||
}
|
||||
|
||||
|
@ -3692,7 +3692,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
|
||||
}
|
||||
|
||||
for (i = 0; i < ioc->combined_reply_index_count; i++) {
|
||||
ioc->replyPostRegisterIndex[i] = (resource_size_t *)
|
||||
ioc->replyPostRegisterIndex[i] =
|
||||
(resource_size_t __iomem *)
|
||||
((u8 __force *)&ioc->chip->Doorbell +
|
||||
MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
|
||||
(i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
|
||||
@ -4312,7 +4313,7 @@ _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
||||
descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
|
||||
descriptor.SMID = cpu_to_le16(smid);
|
||||
|
||||
writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
|
||||
writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4334,7 +4335,7 @@ _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
||||
descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
|
||||
descriptor.SMID = cpu_to_le16(smid);
|
||||
|
||||
writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
|
||||
writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4357,7 +4358,7 @@ _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
||||
descriptor.MSIxIndex = msix_task;
|
||||
descriptor.SMID = cpu_to_le16(smid);
|
||||
|
||||
writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
|
||||
writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4378,7 +4379,7 @@ _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
||||
descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
|
||||
descriptor.SMID = cpu_to_le16(smid);
|
||||
|
||||
writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
|
||||
writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4752,7 +4753,7 @@ static void
|
||||
_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
|
||||
{
|
||||
int i = 0;
|
||||
char desc[16];
|
||||
char desc[17] = {0};
|
||||
u32 iounit_pg1_flags;
|
||||
u32 bios_version;
|
||||
|
||||
@ -6893,7 +6894,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
|
||||
|
||||
/* send message 32-bits at a time */
|
||||
for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
|
||||
writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
|
||||
writel(request[i], &ioc->chip->Doorbell);
|
||||
if ((_base_wait_for_doorbell_ack(ioc, 5)))
|
||||
failed = 1;
|
||||
}
|
||||
@ -6912,16 +6913,16 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
|
||||
}
|
||||
|
||||
/* read the first two 16-bits, it gives the total length of the reply */
|
||||
reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
|
||||
& MPI2_DOORBELL_DATA_MASK);
|
||||
reply[0] = ioc->base_readl(&ioc->chip->Doorbell)
|
||||
& MPI2_DOORBELL_DATA_MASK;
|
||||
writel(0, &ioc->chip->HostInterruptStatus);
|
||||
if ((_base_wait_for_doorbell_int(ioc, 5))) {
|
||||
ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
|
||||
__LINE__);
|
||||
return -EFAULT;
|
||||
}
|
||||
reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
|
||||
& MPI2_DOORBELL_DATA_MASK);
|
||||
reply[1] = ioc->base_readl(&ioc->chip->Doorbell)
|
||||
& MPI2_DOORBELL_DATA_MASK;
|
||||
writel(0, &ioc->chip->HostInterruptStatus);
|
||||
|
||||
for (i = 2; i < default_reply->MsgLength * 2; i++) {
|
||||
@ -6933,9 +6934,8 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
|
||||
if (i >= reply_bytes/2) /* overflow case */
|
||||
ioc->base_readl(&ioc->chip->Doorbell);
|
||||
else
|
||||
reply[i] = le16_to_cpu(
|
||||
ioc->base_readl(&ioc->chip->Doorbell)
|
||||
& MPI2_DOORBELL_DATA_MASK);
|
||||
reply[i] = ioc->base_readl(&ioc->chip->Doorbell)
|
||||
& MPI2_DOORBELL_DATA_MASK;
|
||||
writel(0, &ioc->chip->HostInterruptStatus);
|
||||
}
|
||||
|
||||
|
@ -77,8 +77,8 @@
|
||||
#define MPT3SAS_DRIVER_NAME "mpt3sas"
|
||||
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
|
||||
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
|
||||
#define MPT3SAS_DRIVER_VERSION "40.100.00.00"
|
||||
#define MPT3SAS_MAJOR_VERSION 40
|
||||
#define MPT3SAS_DRIVER_VERSION "42.100.00.00"
|
||||
#define MPT3SAS_MAJOR_VERSION 42
|
||||
#define MPT3SAS_MINOR_VERSION 100
|
||||
#define MPT3SAS_BUILD_VERSION 0
|
||||
#define MPT3SAS_RELEASE_VERSION 00
|
||||
@ -1588,7 +1588,7 @@ struct MPT3SAS_ADAPTER {
|
||||
u8 combined_reply_index_count;
|
||||
u8 smp_affinity_enable;
|
||||
/* reply post register index */
|
||||
resource_size_t **replyPostRegisterIndex;
|
||||
resource_size_t __iomem **replyPostRegisterIndex;
|
||||
|
||||
struct list_head delayed_tr_list;
|
||||
struct list_head delayed_tr_volume_list;
|
||||
|
@ -578,7 +578,7 @@ static int
|
||||
_ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
|
||||
Mpi2SCSITaskManagementRequest_t *tm_request)
|
||||
{
|
||||
u8 found = 0;
|
||||
bool found = false;
|
||||
u16 smid;
|
||||
u16 handle;
|
||||
struct scsi_cmnd *scmd;
|
||||
@ -600,6 +600,7 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
|
||||
handle = le16_to_cpu(tm_request->DevHandle);
|
||||
for (smid = ioc->scsiio_depth; smid && !found; smid--) {
|
||||
struct scsiio_tracker *st;
|
||||
__le16 task_mid;
|
||||
|
||||
scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
|
||||
if (!scmd)
|
||||
@ -618,10 +619,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
|
||||
* first outstanding smid will be picked up. Otherwise,
|
||||
* targeted smid will be the one.
|
||||
*/
|
||||
if (!tm_request->TaskMID || tm_request->TaskMID == st->smid) {
|
||||
tm_request->TaskMID = cpu_to_le16(st->smid);
|
||||
found = 1;
|
||||
}
|
||||
task_mid = cpu_to_le16(st->smid);
|
||||
if (!tm_request->TaskMID)
|
||||
tm_request->TaskMID = task_mid;
|
||||
found = tm_request->TaskMID == task_mid;
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
|
@ -10926,20 +10926,20 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
|
||||
case MPI2_EVENT_LOG_ENTRY_ADDED:
|
||||
{
|
||||
Mpi2EventDataLogEntryAdded_t *log_entry;
|
||||
u32 *log_code;
|
||||
u32 log_code;
|
||||
|
||||
if (!ioc->is_warpdrive)
|
||||
break;
|
||||
|
||||
log_entry = (Mpi2EventDataLogEntryAdded_t *)
|
||||
mpi_reply->EventData;
|
||||
log_code = (u32 *)log_entry->LogData;
|
||||
log_code = le32_to_cpu(*(__le32 *)log_entry->LogData);
|
||||
|
||||
if (le16_to_cpu(log_entry->LogEntryQualifier)
|
||||
!= MPT2_WARPDRIVE_LOGENTRY)
|
||||
break;
|
||||
|
||||
switch (le32_to_cpu(*log_code)) {
|
||||
switch (log_code) {
|
||||
case MPT2_WARPDRIVE_LC_SSDT:
|
||||
ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
|
||||
break;
|
||||
@ -12588,20 +12588,18 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
|
||||
*/
|
||||
bool scsih_ncq_prio_supp(struct scsi_device *sdev)
|
||||
{
|
||||
unsigned char *buf;
|
||||
struct scsi_vpd *vpd;
|
||||
bool ncq_prio_supp = false;
|
||||
|
||||
if (!scsi_device_supports_vpd(sdev))
|
||||
return ncq_prio_supp;
|
||||
rcu_read_lock();
|
||||
vpd = rcu_dereference(sdev->vpd_pg89);
|
||||
if (!vpd || vpd->len < 214)
|
||||
goto out;
|
||||
|
||||
buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ncq_prio_supp;
|
||||
ncq_prio_supp = (vpd->data[213] >> 4) & 1;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
|
||||
ncq_prio_supp = (buf[213] >> 4) & 1;
|
||||
|
||||
kfree(buf);
|
||||
return ncq_prio_supp;
|
||||
}
|
||||
/*
|
||||
|
@ -4590,7 +4590,7 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
|
||||
mapped_pci_addr + chip_cfg->ioa_host_mask_clr;
|
||||
pint_regs->global_interrupt_mask_reg =
|
||||
mapped_pci_addr + chip_cfg->global_intr_mask;
|
||||
};
|
||||
}
|
||||
|
||||
pinstance->ioa_reset_attempts = 0;
|
||||
init_waitqueue_head(&pinstance->reset_wait_q);
|
||||
|
@ -131,7 +131,6 @@ qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
|
||||
struct qedf_ctx *qedf = NULL;
|
||||
long reading;
|
||||
int ret = 0;
|
||||
char msg[40];
|
||||
|
||||
if (off != 0)
|
||||
return ret;
|
||||
@ -148,7 +147,6 @@ qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
|
||||
return ret;
|
||||
}
|
||||
|
||||
memset(msg, 0, sizeof(msg));
|
||||
switch (reading) {
|
||||
case 0:
|
||||
memset(qedf->grcdump, 0, qedf->grcdump_size);
|
||||
|
@ -804,7 +804,6 @@ static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
|
||||
struct qedf_io_log *io_log;
|
||||
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
|
||||
unsigned long flags;
|
||||
uint8_t op;
|
||||
|
||||
spin_lock_irqsave(&qedf->io_trace_lock, flags);
|
||||
|
||||
@ -813,7 +812,7 @@ static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
|
||||
io_log->task_id = io_req->xid;
|
||||
io_log->port_id = fcport->rdata->ids.port_id;
|
||||
io_log->lun = sc_cmd->device->lun;
|
||||
io_log->op = op = sc_cmd->cmnd[0];
|
||||
io_log->op = sc_cmd->cmnd[0];
|
||||
io_log->lba[0] = sc_cmd->cmnd[2];
|
||||
io_log->lba[1] = sc_cmd->cmnd[3];
|
||||
io_log->lba[2] = sc_cmd->cmnd[4];
|
||||
|
@ -873,7 +873,7 @@ static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
|
||||
|
||||
bool qedf_wait_for_upload(struct qedf_ctx *qedf)
|
||||
{
|
||||
struct qedf_rport *fcport = NULL;
|
||||
struct qedf_rport *fcport;
|
||||
int wait_cnt = 120;
|
||||
|
||||
while (wait_cnt--) {
|
||||
@ -888,7 +888,7 @@ bool qedf_wait_for_upload(struct qedf_ctx *qedf)
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
|
||||
if (fcport && test_bit(QEDF_RPORT_SESSION_READY,
|
||||
if (test_bit(QEDF_RPORT_SESSION_READY,
|
||||
&fcport->flags)) {
|
||||
if (fcport->rdata)
|
||||
QEDF_ERR(&qedf->dbg_ctx,
|
||||
@ -899,9 +899,9 @@ bool qedf_wait_for_upload(struct qedf_ctx *qedf)
|
||||
"Waiting for fcport %p.\n", fcport);
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
/* Performs soft reset of qedf_ctx by simulating a link down/up */
|
||||
@ -1067,7 +1067,6 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
|
||||
u32 crc;
|
||||
unsigned int hlen, tlen, elen;
|
||||
int wlen;
|
||||
struct fc_stats *stats;
|
||||
struct fc_lport *tmp_lport;
|
||||
struct fc_lport *vn_port = NULL;
|
||||
struct qedf_rport *fcport;
|
||||
@ -1215,10 +1214,8 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
|
||||
hp->fcoe_sof = sof;
|
||||
|
||||
/*update tx stats */
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->TxFrames++;
|
||||
stats->TxWords += wlen;
|
||||
put_cpu();
|
||||
this_cpu_inc(lport->stats->TxFrames);
|
||||
this_cpu_add(lport->stats->TxWords, wlen);
|
||||
|
||||
/* Get VLAN ID from skb for printing purposes */
|
||||
__vlan_hwaccel_get_tag(skb, &vlan_tci);
|
||||
|
@ -657,7 +657,6 @@ qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport,
|
||||
static int
|
||||
qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
|
||||
{
|
||||
int32_t rval = 0;
|
||||
struct auth_complete_cmd appplogiok;
|
||||
struct app_plogi_reply appplogireply = {0};
|
||||
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
|
||||
@ -758,7 +757,7 @@ errstate_exit:
|
||||
&appplogireply,
|
||||
sizeof(struct app_plogi_reply));
|
||||
|
||||
return rval;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3933,7 +3933,6 @@ qla2x00_free_device(scsi_qla_host_t *vha)
|
||||
|
||||
/* Flush the work queue and remove it */
|
||||
if (ha->wq) {
|
||||
flush_workqueue(ha->wq);
|
||||
destroy_workqueue(ha->wq);
|
||||
ha->wq = NULL;
|
||||
}
|
||||
|
@ -3866,8 +3866,6 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
|
||||
|
||||
BUG_ON(cmd->sg_mapped);
|
||||
cmd->jiffies_at_free = get_jiffies_64();
|
||||
if (unlikely(cmd->free_sg))
|
||||
kfree(cmd->sg);
|
||||
|
||||
if (!sess || !sess->se_sess) {
|
||||
WARN_ON(1);
|
||||
|
@ -883,7 +883,6 @@ struct qla_tgt_cmd {
|
||||
/* to save extra sess dereferences */
|
||||
unsigned int conf_compl_supported:1;
|
||||
unsigned int sg_mapped:1;
|
||||
unsigned int free_sg:1;
|
||||
unsigned int write_data_transferred:1;
|
||||
unsigned int q_full:1;
|
||||
unsigned int term_exchg:1;
|
||||
|
@ -671,7 +671,6 @@ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
|
||||
goto exit_chap_list;
|
||||
}
|
||||
|
||||
memset(ha->chap_list, 0, chap_size);
|
||||
memcpy(ha->chap_list, chap_flash_data, chap_size);
|
||||
|
||||
exit_chap_list:
|
||||
|
@ -200,11 +200,11 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
|
||||
|
||||
|
||||
/*
|
||||
* 1024 is big enough for saturating fast SCSI LUNs.
|
||||
* 4096 is big enough for saturating fast SCSI LUNs.
|
||||
*/
|
||||
int scsi_device_max_queue_depth(struct scsi_device *sdev)
|
||||
{
|
||||
return min_t(int, sdev->host->can_queue, 1024);
|
||||
return min_t(int, sdev->host->can_queue, 4096);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -321,6 +321,31 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
|
||||
return get_unaligned_be16(&buffer[2]) + 4;
|
||||
}
|
||||
|
||||
static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
|
||||
{
|
||||
unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
|
||||
int result;
|
||||
|
||||
/*
|
||||
* Fetch the VPD page header to find out how big the page
|
||||
* is. This is done to prevent problems on legacy devices
|
||||
* which can not handle allocation lengths as large as
|
||||
* potentially requested by the caller.
|
||||
*/
|
||||
result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
|
||||
if (result < 0)
|
||||
return 0;
|
||||
|
||||
if (result < SCSI_VPD_HEADER_SIZE) {
|
||||
dev_warn_once(&sdev->sdev_gendev,
|
||||
"%s: short VPD page 0x%02x length: %d bytes\n",
|
||||
__func__, page, result);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_get_vpd_page - Get Vital Product Data from a SCSI device
|
||||
* @sdev: The device to ask
|
||||
@ -330,47 +355,38 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
|
||||
*
|
||||
* SCSI devices may optionally supply Vital Product Data. Each 'page'
|
||||
* of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
|
||||
* If the device supports this VPD page, this routine returns a pointer
|
||||
* to a buffer containing the data from that page. The caller is
|
||||
* responsible for calling kfree() on this pointer when it is no longer
|
||||
* needed. If we cannot retrieve the VPD page this routine returns %NULL.
|
||||
* If the device supports this VPD page, this routine fills @buf
|
||||
* with the data from that page and return 0. If the VPD page is not
|
||||
* supported or its content cannot be retrieved, -EINVAL is returned.
|
||||
*/
|
||||
int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
|
||||
int buf_len)
|
||||
{
|
||||
int i, result;
|
||||
int result, vpd_len;
|
||||
|
||||
if (sdev->skip_vpd_pages)
|
||||
goto fail;
|
||||
|
||||
/* Ask for all the pages supported by this device */
|
||||
result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
|
||||
if (result < 4)
|
||||
goto fail;
|
||||
|
||||
/* If the user actually wanted this page, we can skip the rest */
|
||||
if (page == 0)
|
||||
return 0;
|
||||
|
||||
for (i = 4; i < min(result, buf_len); i++)
|
||||
if (buf[i] == page)
|
||||
goto found;
|
||||
|
||||
if (i < result && i >= buf_len)
|
||||
/* ran off the end of the buffer, give us benefit of doubt */
|
||||
goto found;
|
||||
/* The device claims it doesn't support the requested page */
|
||||
goto fail;
|
||||
|
||||
found:
|
||||
result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
|
||||
if (result < 0)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
if (!scsi_device_supports_vpd(sdev))
|
||||
return -EINVAL;
|
||||
|
||||
vpd_len = scsi_get_vpd_size(sdev, page);
|
||||
if (vpd_len <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
vpd_len = min(vpd_len, buf_len);
|
||||
|
||||
/*
|
||||
* Fetch the actual page. Since the appropriate size was reported
|
||||
* by the device it is now safe to ask for something bigger.
|
||||
*/
|
||||
memset(buf, 0, buf_len);
|
||||
result = scsi_vpd_inquiry(sdev, buf, page, vpd_len);
|
||||
if (result < 0)
|
||||
return -EINVAL;
|
||||
else if (result > vpd_len)
|
||||
dev_warn_once(&sdev->sdev_gendev,
|
||||
"%s: VPD page 0x%02x result %d > %d bytes\n",
|
||||
__func__, page, result, vpd_len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
|
||||
|
||||
@ -384,9 +400,17 @@ EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
|
||||
static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page)
|
||||
{
|
||||
struct scsi_vpd *vpd_buf;
|
||||
int vpd_len = SCSI_VPD_PG_LEN, result;
|
||||
int vpd_len, result;
|
||||
|
||||
vpd_len = scsi_get_vpd_size(sdev, page);
|
||||
if (vpd_len <= 0)
|
||||
return NULL;
|
||||
|
||||
retry_pg:
|
||||
/*
|
||||
* Fetch the actual page. Since the appropriate size was reported
|
||||
* by the device it is now safe to ask for something bigger.
|
||||
*/
|
||||
vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL);
|
||||
if (!vpd_buf)
|
||||
return NULL;
|
||||
@ -397,6 +421,9 @@ retry_pg:
|
||||
return NULL;
|
||||
}
|
||||
if (result > vpd_len) {
|
||||
dev_warn_once(&sdev->sdev_gendev,
|
||||
"%s: VPD page 0x%02x result %d > %d bytes\n",
|
||||
__func__, page, result, vpd_len);
|
||||
vpd_len = result;
|
||||
kfree(vpd_buf);
|
||||
goto retry_pg;
|
||||
@ -456,6 +483,12 @@ void scsi_attach_vpd(struct scsi_device *sdev)
|
||||
scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
|
||||
if (vpd_buf->data[i] == 0x89)
|
||||
scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
|
||||
if (vpd_buf->data[i] == 0xb0)
|
||||
scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0);
|
||||
if (vpd_buf->data[i] == 0xb1)
|
||||
scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1);
|
||||
if (vpd_buf->data[i] == 0xb2)
|
||||
scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2);
|
||||
}
|
||||
kfree(vpd_buf);
|
||||
}
|
||||
@ -476,21 +509,30 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
|
||||
{
|
||||
unsigned char cmd[16];
|
||||
struct scsi_sense_hdr sshdr;
|
||||
int result;
|
||||
int result, request_len;
|
||||
|
||||
if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
|
||||
return -EINVAL;
|
||||
|
||||
/* RSOC header + size of command we are asking about */
|
||||
request_len = 4 + COMMAND_SIZE(opcode);
|
||||
if (request_len > len) {
|
||||
dev_warn_once(&sdev->sdev_gendev,
|
||||
"%s: len %u bytes, opcode 0x%02x needs %u\n",
|
||||
__func__, len, opcode, request_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(cmd, 0, 16);
|
||||
cmd[0] = MAINTENANCE_IN;
|
||||
cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
|
||||
cmd[2] = 1; /* One command format */
|
||||
cmd[3] = opcode;
|
||||
put_unaligned_be32(len, &cmd[6]);
|
||||
put_unaligned_be32(request_len, &cmd[6]);
|
||||
memset(buffer, 0, len);
|
||||
|
||||
result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
|
||||
&sshdr, 30 * HZ, 3, NULL);
|
||||
result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
|
||||
request_len, &sshdr, 30 * HZ, 3, NULL);
|
||||
|
||||
if (result < 0)
|
||||
return result;
|
||||
|
@ -16,7 +16,7 @@
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <linux/align.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/jiffies.h>
|
||||
@ -98,6 +98,7 @@ static const char *sdebug_version_date = "20210520";
|
||||
#define WRITE_BOUNDARY_ASCQ 0x5
|
||||
#define READ_INVDATA_ASCQ 0x6
|
||||
#define READ_BOUNDARY_ASCQ 0x7
|
||||
#define ATTEMPT_ACCESS_GAP 0x9
|
||||
#define INSUFF_ZONE_ASCQ 0xe
|
||||
|
||||
/* Additional Sense Code Qualifier (ASCQ) */
|
||||
@ -251,9 +252,11 @@ static const char *sdebug_version_date = "20210520";
|
||||
|
||||
/* Zone types (zbcr05 table 25) */
|
||||
enum sdebug_z_type {
|
||||
ZBC_ZONE_TYPE_CNV = 0x1,
|
||||
ZBC_ZONE_TYPE_SWR = 0x2,
|
||||
ZBC_ZONE_TYPE_SWP = 0x3,
|
||||
ZBC_ZTYPE_CNV = 0x1,
|
||||
ZBC_ZTYPE_SWR = 0x2,
|
||||
ZBC_ZTYPE_SWP = 0x3,
|
||||
/* ZBC_ZTYPE_SOBR = 0x4, */
|
||||
ZBC_ZTYPE_GAP = 0x5,
|
||||
};
|
||||
|
||||
/* enumeration names taken from table 26, zbcr05 */
|
||||
@ -291,10 +294,12 @@ struct sdebug_dev_info {
|
||||
|
||||
/* For ZBC devices */
|
||||
enum blk_zoned_model zmodel;
|
||||
unsigned int zcap;
|
||||
unsigned int zsize;
|
||||
unsigned int zsize_shift;
|
||||
unsigned int nr_zones;
|
||||
unsigned int nr_conv_zones;
|
||||
unsigned int nr_seq_zones;
|
||||
unsigned int nr_imp_open;
|
||||
unsigned int nr_exp_open;
|
||||
unsigned int nr_closed;
|
||||
@ -829,6 +834,7 @@ static int dif_errors;
|
||||
|
||||
/* ZBC global data */
|
||||
static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
|
||||
static int sdeb_zbc_zone_cap_mb;
|
||||
static int sdeb_zbc_zone_size_mb;
|
||||
static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
|
||||
static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
|
||||
@ -1559,6 +1565,12 @@ static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
|
||||
put_unaligned_be32(devip->max_open, &arr[12]);
|
||||
else
|
||||
put_unaligned_be32(0xffffffff, &arr[12]);
|
||||
if (devip->zcap < devip->zsize) {
|
||||
arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
|
||||
put_unaligned_be64(devip->zsize, &arr[20]);
|
||||
} else {
|
||||
arr[19] = 0;
|
||||
}
|
||||
return 0x3c;
|
||||
}
|
||||
|
||||
@ -2711,12 +2723,38 @@ static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
|
||||
static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
|
||||
unsigned long long lba)
|
||||
{
|
||||
return &devip->zstate[lba >> devip->zsize_shift];
|
||||
u32 zno = lba >> devip->zsize_shift;
|
||||
struct sdeb_zone_state *zsp;
|
||||
|
||||
if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
|
||||
return &devip->zstate[zno];
|
||||
|
||||
/*
|
||||
* If the zone capacity is less than the zone size, adjust for gap
|
||||
* zones.
|
||||
*/
|
||||
zno = 2 * zno - devip->nr_conv_zones;
|
||||
WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
|
||||
zsp = &devip->zstate[zno];
|
||||
if (lba >= zsp->z_start + zsp->z_size)
|
||||
zsp++;
|
||||
WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
|
||||
return zsp;
|
||||
}
|
||||
|
||||
static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
|
||||
{
|
||||
return zsp->z_type == ZBC_ZONE_TYPE_CNV;
|
||||
return zsp->z_type == ZBC_ZTYPE_CNV;
|
||||
}
|
||||
|
||||
static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
|
||||
{
|
||||
return zsp->z_type == ZBC_ZTYPE_GAP;
|
||||
}
|
||||
|
||||
static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
|
||||
{
|
||||
return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
|
||||
}
|
||||
|
||||
static void zbc_close_zone(struct sdebug_dev_info *devip,
|
||||
@ -2724,7 +2762,7 @@ static void zbc_close_zone(struct sdebug_dev_info *devip,
|
||||
{
|
||||
enum sdebug_z_cond zc;
|
||||
|
||||
if (zbc_zone_is_conv(zsp))
|
||||
if (!zbc_zone_is_seq(zsp))
|
||||
return;
|
||||
|
||||
zc = zsp->z_cond;
|
||||
@ -2762,7 +2800,7 @@ static void zbc_open_zone(struct sdebug_dev_info *devip,
|
||||
{
|
||||
enum sdebug_z_cond zc;
|
||||
|
||||
if (zbc_zone_is_conv(zsp))
|
||||
if (!zbc_zone_is_seq(zsp))
|
||||
return;
|
||||
|
||||
zc = zsp->z_cond;
|
||||
@ -2794,10 +2832,10 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
|
||||
struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
|
||||
unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
|
||||
|
||||
if (zbc_zone_is_conv(zsp))
|
||||
if (!zbc_zone_is_seq(zsp))
|
||||
return;
|
||||
|
||||
if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
|
||||
if (zsp->z_type == ZBC_ZTYPE_SWR) {
|
||||
zsp->z_wp += num;
|
||||
if (zsp->z_wp >= zend)
|
||||
zsp->z_cond = ZC5_FULL;
|
||||
@ -2842,9 +2880,7 @@ static int check_zbc_access_params(struct scsi_cmnd *scp,
|
||||
if (devip->zmodel == BLK_ZONED_HA)
|
||||
return 0;
|
||||
/* For host-managed, reads cannot cross zone types boundaries */
|
||||
if (zsp_end != zsp &&
|
||||
zbc_zone_is_conv(zsp) &&
|
||||
!zbc_zone_is_conv(zsp_end)) {
|
||||
if (zsp->z_type != zsp_end->z_type) {
|
||||
mk_sense_buffer(scp, ILLEGAL_REQUEST,
|
||||
LBA_OUT_OF_RANGE,
|
||||
READ_INVDATA_ASCQ);
|
||||
@ -2853,6 +2889,13 @@ static int check_zbc_access_params(struct scsi_cmnd *scp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Writing into a gap zone is not allowed */
|
||||
if (zbc_zone_is_gap(zsp)) {
|
||||
mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
|
||||
ATTEMPT_ACCESS_GAP);
|
||||
return check_condition_result;
|
||||
}
|
||||
|
||||
/* No restrictions for writes within conventional zones */
|
||||
if (zbc_zone_is_conv(zsp)) {
|
||||
if (!zbc_zone_is_conv(zsp_end)) {
|
||||
@ -2864,7 +2907,7 @@ static int check_zbc_access_params(struct scsi_cmnd *scp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
|
||||
if (zsp->z_type == ZBC_ZTYPE_SWR) {
|
||||
/* Writes cannot cross sequential zone boundaries */
|
||||
if (zsp_end != zsp) {
|
||||
mk_sense_buffer(scp, ILLEGAL_REQUEST,
|
||||
@ -4404,18 +4447,18 @@ cleanup:
|
||||
|
||||
#define RZONES_DESC_HD 64
|
||||
|
||||
/* Report zones depending on start LBA nad reporting options */
|
||||
/* Report zones depending on start LBA and reporting options */
|
||||
static int resp_report_zones(struct scsi_cmnd *scp,
|
||||
struct sdebug_dev_info *devip)
|
||||
{
|
||||
unsigned int i, max_zones, rep_max_zones, nrz = 0;
|
||||
unsigned int rep_max_zones, nrz = 0;
|
||||
int ret = 0;
|
||||
u32 alloc_len, rep_opts, rep_len;
|
||||
bool partial;
|
||||
u64 lba, zs_lba;
|
||||
u8 *arr = NULL, *desc;
|
||||
u8 *cmd = scp->cmnd;
|
||||
struct sdeb_zone_state *zsp;
|
||||
struct sdeb_zone_state *zsp = NULL;
|
||||
struct sdeb_store_info *sip = devip2sip(devip, false);
|
||||
|
||||
if (!sdebug_dev_is_zoned(devip)) {
|
||||
@ -4434,9 +4477,7 @@ static int resp_report_zones(struct scsi_cmnd *scp,
|
||||
return check_condition_result;
|
||||
}
|
||||
|
||||
max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
|
||||
rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
|
||||
max_zones);
|
||||
rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
|
||||
|
||||
arr = kzalloc(alloc_len, GFP_ATOMIC);
|
||||
if (!arr) {
|
||||
@ -4448,9 +4489,9 @@ static int resp_report_zones(struct scsi_cmnd *scp,
|
||||
sdeb_read_lock(sip);
|
||||
|
||||
desc = arr + 64;
|
||||
for (i = 0; i < max_zones; i++) {
|
||||
lba = zs_lba + devip->zsize * i;
|
||||
if (lba > sdebug_capacity)
|
||||
for (lba = zs_lba; lba < sdebug_capacity;
|
||||
lba = zsp->z_start + zsp->z_size) {
|
||||
if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
|
||||
break;
|
||||
zsp = zbc_zone(devip, lba);
|
||||
switch (rep_opts) {
|
||||
@ -4495,9 +4536,14 @@ static int resp_report_zones(struct scsi_cmnd *scp,
|
||||
if (!zsp->z_non_seq_resource)
|
||||
continue;
|
||||
break;
|
||||
case 0x3e:
|
||||
/* All zones except gap zones. */
|
||||
if (zbc_zone_is_gap(zsp))
|
||||
continue;
|
||||
break;
|
||||
case 0x3f:
|
||||
/* Not write pointer (conventional) zones */
|
||||
if (!zbc_zone_is_conv(zsp))
|
||||
if (zbc_zone_is_seq(zsp))
|
||||
continue;
|
||||
break;
|
||||
default:
|
||||
@ -4526,8 +4572,13 @@ static int resp_report_zones(struct scsi_cmnd *scp,
|
||||
}
|
||||
|
||||
/* Report header */
|
||||
/* Zone list length. */
|
||||
put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
|
||||
/* Maximum LBA */
|
||||
put_unaligned_be64(sdebug_capacity - 1, arr + 8);
|
||||
/* Zone starting LBA granularity. */
|
||||
if (devip->zcap < devip->zsize)
|
||||
put_unaligned_be64(devip->zsize, arr + 16);
|
||||
|
||||
rep_len = (unsigned long)desc - (unsigned long)arr;
|
||||
ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
|
||||
@ -4752,7 +4803,7 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip,
|
||||
enum sdebug_z_cond zc;
|
||||
struct sdeb_store_info *sip = devip2sip(devip, false);
|
||||
|
||||
if (zbc_zone_is_conv(zsp))
|
||||
if (!zbc_zone_is_seq(zsp))
|
||||
return;
|
||||
|
||||
zc = zsp->z_cond;
|
||||
@ -4942,6 +4993,7 @@ static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
|
||||
{
|
||||
struct sdeb_zone_state *zsp;
|
||||
sector_t capacity = get_sdebug_capacity();
|
||||
sector_t conv_capacity;
|
||||
sector_t zstart = 0;
|
||||
unsigned int i;
|
||||
|
||||
@ -4976,11 +5028,30 @@ static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
|
||||
devip->zsize_shift = ilog2(devip->zsize);
|
||||
devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
|
||||
|
||||
if (sdeb_zbc_nr_conv >= devip->nr_zones) {
|
||||
if (sdeb_zbc_zone_cap_mb == 0) {
|
||||
devip->zcap = devip->zsize;
|
||||
} else {
|
||||
devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
|
||||
ilog2(sdebug_sector_size);
|
||||
if (devip->zcap > devip->zsize) {
|
||||
pr_err("Zone capacity too large\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
|
||||
if (conv_capacity >= capacity) {
|
||||
pr_err("Number of conventional zones too large\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
devip->nr_conv_zones = sdeb_zbc_nr_conv;
|
||||
devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
|
||||
devip->zsize_shift;
|
||||
devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
|
||||
|
||||
/* Add gap zones if zone capacity is smaller than the zone size */
|
||||
if (devip->zcap < devip->zsize)
|
||||
devip->nr_zones += devip->nr_seq_zones;
|
||||
|
||||
if (devip->zmodel == BLK_ZONED_HM) {
|
||||
/* zbc_max_open_zones can be 0, meaning "not reported" */
|
||||
@ -5001,23 +5072,29 @@ static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
|
||||
zsp->z_start = zstart;
|
||||
|
||||
if (i < devip->nr_conv_zones) {
|
||||
zsp->z_type = ZBC_ZONE_TYPE_CNV;
|
||||
zsp->z_type = ZBC_ZTYPE_CNV;
|
||||
zsp->z_cond = ZBC_NOT_WRITE_POINTER;
|
||||
zsp->z_wp = (sector_t)-1;
|
||||
} else {
|
||||
zsp->z_size =
|
||||
min_t(u64, devip->zsize, capacity - zstart);
|
||||
} else if ((zstart & (devip->zsize - 1)) == 0) {
|
||||
if (devip->zmodel == BLK_ZONED_HM)
|
||||
zsp->z_type = ZBC_ZONE_TYPE_SWR;
|
||||
zsp->z_type = ZBC_ZTYPE_SWR;
|
||||
else
|
||||
zsp->z_type = ZBC_ZONE_TYPE_SWP;
|
||||
zsp->z_type = ZBC_ZTYPE_SWP;
|
||||
zsp->z_cond = ZC1_EMPTY;
|
||||
zsp->z_wp = zsp->z_start;
|
||||
zsp->z_size =
|
||||
min_t(u64, devip->zcap, capacity - zstart);
|
||||
} else {
|
||||
zsp->z_type = ZBC_ZTYPE_GAP;
|
||||
zsp->z_cond = ZBC_NOT_WRITE_POINTER;
|
||||
zsp->z_wp = (sector_t)-1;
|
||||
zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
|
||||
capacity - zstart);
|
||||
}
|
||||
|
||||
if (zsp->z_start + devip->zsize < capacity)
|
||||
zsp->z_size = devip->zsize;
|
||||
else
|
||||
zsp->z_size = capacity - zsp->z_start;
|
||||
|
||||
WARN_ON_ONCE((int)zsp->z_size <= 0);
|
||||
zstart += zsp->z_size;
|
||||
}
|
||||
|
||||
@ -5779,6 +5856,7 @@ module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
|
||||
module_param_named(write_same_length, sdebug_write_same_length, int,
|
||||
S_IRUGO | S_IWUSR);
|
||||
module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
|
||||
module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
|
||||
module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
|
||||
module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
|
||||
module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
|
||||
@ -5850,6 +5928,7 @@ MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique de
|
||||
MODULE_PARM_DESC(wp, "Write Protect (def=0)");
|
||||
MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
|
||||
MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
|
||||
MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
|
||||
MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
|
||||
MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
|
||||
MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
|
||||
|
@ -1977,7 +1977,7 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
|
||||
tag_set->nr_maps = shost->nr_maps ? : 1;
|
||||
tag_set->queue_depth = shost->can_queue;
|
||||
tag_set->cmd_size = cmd_size;
|
||||
tag_set->numa_node = NUMA_NO_NODE;
|
||||
tag_set->numa_node = dev_to_node(shost->dma_dev);
|
||||
tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
tag_set->flags |=
|
||||
BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
|
||||
|
@ -733,7 +733,17 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
||||
if (pass == 1) {
|
||||
if (BLIST_INQUIRY_36 & *bflags)
|
||||
next_inquiry_len = 36;
|
||||
else if (sdev->inquiry_len)
|
||||
/*
|
||||
* LLD specified a maximum sdev->inquiry_len
|
||||
* but device claims it has more data. Capping
|
||||
* the length only makes sense for legacy
|
||||
* devices. If a device supports SPC-4 (2014)
|
||||
* or newer, assume that it is safe to ask for
|
||||
* as much as the device says it supports.
|
||||
*/
|
||||
else if (sdev->inquiry_len &&
|
||||
response_len > sdev->inquiry_len &&
|
||||
(inq_result[2] & 0x7) < 6) /* SPC-4 */
|
||||
next_inquiry_len = sdev->inquiry_len;
|
||||
else
|
||||
next_inquiry_len = response_len;
|
||||
|
@ -448,6 +448,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
|
||||
struct list_head *this, *tmp;
|
||||
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
|
||||
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
|
||||
struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
|
||||
unsigned long flags;
|
||||
struct module *mod;
|
||||
|
||||
@ -490,6 +491,12 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
|
||||
lockdep_is_held(&sdev->inquiry_mutex));
|
||||
vpd_pg89 = rcu_replace_pointer(sdev->vpd_pg89, vpd_pg89,
|
||||
lockdep_is_held(&sdev->inquiry_mutex));
|
||||
vpd_pgb0 = rcu_replace_pointer(sdev->vpd_pgb0, vpd_pgb0,
|
||||
lockdep_is_held(&sdev->inquiry_mutex));
|
||||
vpd_pgb1 = rcu_replace_pointer(sdev->vpd_pgb1, vpd_pgb1,
|
||||
lockdep_is_held(&sdev->inquiry_mutex));
|
||||
vpd_pgb2 = rcu_replace_pointer(sdev->vpd_pgb2, vpd_pgb2,
|
||||
lockdep_is_held(&sdev->inquiry_mutex));
|
||||
mutex_unlock(&sdev->inquiry_mutex);
|
||||
|
||||
if (vpd_pg0)
|
||||
@ -500,6 +507,12 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
|
||||
kfree_rcu(vpd_pg80, rcu);
|
||||
if (vpd_pg89)
|
||||
kfree_rcu(vpd_pg89, rcu);
|
||||
if (vpd_pgb0)
|
||||
kfree_rcu(vpd_pgb0, rcu);
|
||||
if (vpd_pgb1)
|
||||
kfree_rcu(vpd_pgb1, rcu);
|
||||
if (vpd_pgb2)
|
||||
kfree_rcu(vpd_pgb2, rcu);
|
||||
kfree(sdev->inquiry);
|
||||
kfree(sdev);
|
||||
|
||||
@ -913,6 +926,9 @@ static struct bin_attribute dev_attr_vpd_##_page = { \
|
||||
sdev_vpd_pg_attr(pg83);
|
||||
sdev_vpd_pg_attr(pg80);
|
||||
sdev_vpd_pg_attr(pg89);
|
||||
sdev_vpd_pg_attr(pgb0);
|
||||
sdev_vpd_pg_attr(pgb1);
|
||||
sdev_vpd_pg_attr(pgb2);
|
||||
sdev_vpd_pg_attr(pg0);
|
||||
|
||||
static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
|
||||
@ -1250,6 +1266,15 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
|
||||
if (attr == &dev_attr_vpd_pg89 && !sdev->vpd_pg89)
|
||||
return 0;
|
||||
|
||||
if (attr == &dev_attr_vpd_pgb0 && !sdev->vpd_pgb0)
|
||||
return 0;
|
||||
|
||||
if (attr == &dev_attr_vpd_pgb1 && !sdev->vpd_pgb1)
|
||||
return 0;
|
||||
|
||||
if (attr == &dev_attr_vpd_pgb2 && !sdev->vpd_pgb2)
|
||||
return 0;
|
||||
|
||||
return S_IRUGO;
|
||||
}
|
||||
|
||||
@ -1296,6 +1321,9 @@ static struct bin_attribute *scsi_sdev_bin_attrs[] = {
|
||||
&dev_attr_vpd_pg83,
|
||||
&dev_attr_vpd_pg80,
|
||||
&dev_attr_vpd_pg89,
|
||||
&dev_attr_vpd_pgb0,
|
||||
&dev_attr_vpd_pgb1,
|
||||
&dev_attr_vpd_pgb2,
|
||||
&dev_attr_inquiry,
|
||||
NULL
|
||||
};
|
||||
|
@ -2174,40 +2174,48 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
|
||||
{
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
u8 type;
|
||||
int ret = 0;
|
||||
|
||||
if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
|
||||
sdkp->protection_type = 0;
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
|
||||
|
||||
if (type > T10_PI_TYPE3_PROTECTION)
|
||||
ret = -ENODEV;
|
||||
else if (scsi_host_dif_capable(sdp->host, type))
|
||||
ret = 1;
|
||||
|
||||
if (sdkp->first_scan || type != sdkp->protection_type)
|
||||
switch (ret) {
|
||||
case -ENODEV:
|
||||
if (type > T10_PI_TYPE3_PROTECTION) {
|
||||
sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
|
||||
" protection type %u. Disabling disk!\n",
|
||||
type);
|
||||
break;
|
||||
case 1:
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"Enabling DIF Type %u protection\n", type);
|
||||
break;
|
||||
case 0:
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"Disabling DIF Type %u protection\n", type);
|
||||
break;
|
||||
sdkp->protection_type = 0;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
sdkp->protection_type = type;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sd_config_protection(struct scsi_disk *sdkp)
|
||||
{
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
|
||||
if (!sdkp->first_scan)
|
||||
return;
|
||||
|
||||
sd_dif_config_host(sdkp);
|
||||
|
||||
if (!sdkp->protection_type)
|
||||
return;
|
||||
|
||||
if (!scsi_host_dif_capable(sdp->host, sdkp->protection_type)) {
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"Disabling DIF Type %u protection\n",
|
||||
sdkp->protection_type);
|
||||
sdkp->protection_type = 0;
|
||||
}
|
||||
|
||||
sd_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n",
|
||||
sdkp->protection_type);
|
||||
}
|
||||
|
||||
static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
|
||||
@ -2841,40 +2849,37 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
|
||||
*/
|
||||
static void sd_read_block_limits(struct scsi_disk *sdkp)
|
||||
{
|
||||
unsigned int sector_sz = sdkp->device->sector_size;
|
||||
const int vpd_len = 64;
|
||||
unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
|
||||
struct scsi_vpd *vpd;
|
||||
|
||||
if (!buffer ||
|
||||
/* Block Limits VPD */
|
||||
scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
|
||||
rcu_read_lock();
|
||||
|
||||
vpd = rcu_dereference(sdkp->device->vpd_pgb0);
|
||||
if (!vpd || vpd->len < 16)
|
||||
goto out;
|
||||
|
||||
blk_queue_io_min(sdkp->disk->queue,
|
||||
get_unaligned_be16(&buffer[6]) * sector_sz);
|
||||
sdkp->min_xfer_blocks = get_unaligned_be16(&vpd->data[6]);
|
||||
sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]);
|
||||
sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]);
|
||||
|
||||
sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
|
||||
sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
|
||||
|
||||
if (buffer[3] == 0x3c) {
|
||||
if (vpd->len >= 64) {
|
||||
unsigned int lba_count, desc_count;
|
||||
|
||||
sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
|
||||
sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]);
|
||||
|
||||
if (!sdkp->lbpme)
|
||||
goto out;
|
||||
|
||||
lba_count = get_unaligned_be32(&buffer[20]);
|
||||
desc_count = get_unaligned_be32(&buffer[24]);
|
||||
lba_count = get_unaligned_be32(&vpd->data[20]);
|
||||
desc_count = get_unaligned_be32(&vpd->data[24]);
|
||||
|
||||
if (lba_count && desc_count)
|
||||
sdkp->max_unmap_blocks = lba_count;
|
||||
|
||||
sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
|
||||
sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]);
|
||||
|
||||
if (buffer[32] & 0x80)
|
||||
if (vpd->data[32] & 0x80)
|
||||
sdkp->unmap_alignment =
|
||||
get_unaligned_be32(&buffer[32]) & ~(1 << 31);
|
||||
get_unaligned_be32(&vpd->data[32]) & ~(1 << 31);
|
||||
|
||||
if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
|
||||
|
||||
@ -2896,7 +2901,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(buffer);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2906,18 +2911,21 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
|
||||
static void sd_read_block_characteristics(struct scsi_disk *sdkp)
|
||||
{
|
||||
struct request_queue *q = sdkp->disk->queue;
|
||||
unsigned char *buffer;
|
||||
struct scsi_vpd *vpd;
|
||||
u16 rot;
|
||||
const int vpd_len = 64;
|
||||
u8 zoned;
|
||||
|
||||
buffer = kmalloc(vpd_len, GFP_KERNEL);
|
||||
rcu_read_lock();
|
||||
vpd = rcu_dereference(sdkp->device->vpd_pgb1);
|
||||
|
||||
if (!buffer ||
|
||||
/* Block Device Characteristics VPD */
|
||||
scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
|
||||
goto out;
|
||||
if (!vpd || vpd->len < 8) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
rot = get_unaligned_be16(&buffer[4]);
|
||||
rot = get_unaligned_be16(&vpd->data[4]);
|
||||
zoned = (vpd->data[8] >> 4) & 3;
|
||||
rcu_read_unlock();
|
||||
|
||||
if (rot == 1) {
|
||||
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
|
||||
@ -2928,7 +2936,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
|
||||
/* Host-managed */
|
||||
blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
|
||||
} else {
|
||||
sdkp->zoned = (buffer[8] >> 4) & 3;
|
||||
sdkp->zoned = zoned;
|
||||
if (sdkp->zoned == 1) {
|
||||
/* Host-aware */
|
||||
blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA);
|
||||
@ -2939,7 +2947,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
|
||||
}
|
||||
|
||||
if (!sdkp->first_scan)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
if (blk_queue_is_zoned(q)) {
|
||||
sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
|
||||
@ -2952,9 +2960,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"Drive-managed SMR disk\n");
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(buffer);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2963,24 +2968,24 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
|
||||
*/
|
||||
static void sd_read_block_provisioning(struct scsi_disk *sdkp)
|
||||
{
|
||||
unsigned char *buffer;
|
||||
const int vpd_len = 8;
|
||||
struct scsi_vpd *vpd;
|
||||
|
||||
if (sdkp->lbpme == 0)
|
||||
return;
|
||||
|
||||
buffer = kmalloc(vpd_len, GFP_KERNEL);
|
||||
rcu_read_lock();
|
||||
vpd = rcu_dereference(sdkp->device->vpd_pgb2);
|
||||
|
||||
if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
|
||||
goto out;
|
||||
if (!vpd || vpd->len < 8) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
sdkp->lbpvpd = 1;
|
||||
sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */
|
||||
sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
|
||||
sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
|
||||
|
||||
out:
|
||||
kfree(buffer);
|
||||
sdkp->lbpu = (vpd->data[5] >> 7) & 1; /* UNMAP */
|
||||
sdkp->lbpws = (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */
|
||||
sdkp->lbpws10 = (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
|
||||
@ -2994,8 +2999,7 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
|
||||
}
|
||||
|
||||
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
|
||||
/* too large values might cause issues with arcmsr */
|
||||
int vpd_buf_len = 64;
|
||||
struct scsi_vpd *vpd;
|
||||
|
||||
sdev->no_report_opcodes = 1;
|
||||
|
||||
@ -3003,8 +3007,11 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
|
||||
* CODES is unsupported and the device has an ATA
|
||||
* Information VPD page (SAT).
|
||||
*/
|
||||
if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
|
||||
rcu_read_lock();
|
||||
vpd = rcu_dereference(sdev->vpd_pg89);
|
||||
if (vpd)
|
||||
sdev->no_write_same = 1;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
|
||||
@ -3108,6 +3115,29 @@ out:
|
||||
kfree(buffer);
|
||||
}
|
||||
|
||||
static bool sd_validate_min_xfer_size(struct scsi_disk *sdkp)
|
||||
{
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
unsigned int min_xfer_bytes =
|
||||
logical_to_bytes(sdp, sdkp->min_xfer_blocks);
|
||||
|
||||
if (sdkp->min_xfer_blocks == 0)
|
||||
return false;
|
||||
|
||||
if (min_xfer_bytes & (sdkp->physical_block_size - 1)) {
|
||||
sd_first_printk(KERN_WARNING, sdkp,
|
||||
"Preferred minimum I/O size %u bytes not a " \
|
||||
"multiple of physical block size (%u bytes)\n",
|
||||
min_xfer_bytes, sdkp->physical_block_size);
|
||||
sdkp->min_xfer_blocks = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
sd_first_printk(KERN_INFO, sdkp, "Preferred minimum I/O size %u bytes\n",
|
||||
min_xfer_bytes);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine the device's preferred I/O size for reads and writes
|
||||
* unless the reported value is unreasonably small, large, not a
|
||||
@ -3119,6 +3149,8 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
unsigned int opt_xfer_bytes =
|
||||
logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
|
||||
unsigned int min_xfer_bytes =
|
||||
logical_to_bytes(sdp, sdkp->min_xfer_blocks);
|
||||
|
||||
if (sdkp->opt_xfer_blocks == 0)
|
||||
return false;
|
||||
@ -3147,6 +3179,15 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (min_xfer_bytes && opt_xfer_bytes % min_xfer_bytes) {
|
||||
sd_first_printk(KERN_WARNING, sdkp,
|
||||
"Optimal transfer size %u bytes not a " \
|
||||
"multiple of preferred minimum block " \
|
||||
"size (%u bytes)\n",
|
||||
opt_xfer_bytes, min_xfer_bytes);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
|
||||
sd_first_printk(KERN_WARNING, sdkp,
|
||||
"Optimal transfer size %u bytes not a " \
|
||||
@ -3224,6 +3265,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
||||
sd_read_app_tag_own(sdkp, buffer);
|
||||
sd_read_write_same(sdkp, buffer);
|
||||
sd_read_security(sdkp, buffer);
|
||||
sd_config_protection(sdkp);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3239,6 +3281,12 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
||||
dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
|
||||
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
|
||||
|
||||
if (sd_validate_min_xfer_size(sdkp))
|
||||
blk_queue_io_min(sdkp->disk->queue,
|
||||
logical_to_bytes(sdp, sdkp->min_xfer_blocks));
|
||||
else
|
||||
blk_queue_io_min(sdkp->disk->queue, 0);
|
||||
|
||||
if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
|
||||
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
|
||||
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
|
||||
@ -3477,11 +3525,6 @@ static int sd_probe(struct device *dev)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (sdkp->capacity)
|
||||
sd_dif_config_host(sdkp);
|
||||
|
||||
sd_revalidate_disk(gd);
|
||||
|
||||
if (sdkp->security) {
|
||||
sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
|
||||
if (sdkp->opal_dev)
|
||||
|
@ -67,6 +67,20 @@ enum {
|
||||
SD_ZERO_WS10_UNMAP, /* Use WRITE SAME(10) with UNMAP */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct zoned_disk_info - Specific properties of a ZBC SCSI device.
|
||||
* @nr_zones: number of zones.
|
||||
* @zone_blocks: number of logical blocks per zone.
|
||||
*
|
||||
* This data structure holds the ZBC SCSI device properties that are retrieved
|
||||
* twice: a first time before the gendisk capacity is known and a second time
|
||||
* after the gendisk capacity is known.
|
||||
*/
|
||||
struct zoned_disk_info {
|
||||
u32 nr_zones;
|
||||
u32 zone_blocks;
|
||||
};
|
||||
|
||||
struct scsi_disk {
|
||||
struct scsi_device *device;
|
||||
|
||||
@ -78,13 +92,18 @@ struct scsi_disk {
|
||||
struct gendisk *disk;
|
||||
struct opal_dev *opal_dev;
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
u32 nr_zones;
|
||||
u32 rev_nr_zones;
|
||||
u32 zone_blocks;
|
||||
u32 rev_zone_blocks;
|
||||
/* Updated during revalidation before the gendisk capacity is known. */
|
||||
struct zoned_disk_info early_zone_info;
|
||||
/* Updated during revalidation after the gendisk capacity is known. */
|
||||
struct zoned_disk_info zone_info;
|
||||
u32 zones_optimal_open;
|
||||
u32 zones_optimal_nonseq;
|
||||
u32 zones_max_open;
|
||||
/*
|
||||
* Either zero or a power of two. If not zero it means that the offset
|
||||
* between zone starting LBAs is constant.
|
||||
*/
|
||||
u32 zone_starting_lba_gran;
|
||||
u32 *zones_wp_offset;
|
||||
spinlock_t zones_wp_offset_lock;
|
||||
u32 *rev_wp_offset;
|
||||
@ -95,6 +114,7 @@ struct scsi_disk {
|
||||
atomic_t openers;
|
||||
sector_t capacity; /* size in logical blocks */
|
||||
int max_retries;
|
||||
u32 min_xfer_blocks;
|
||||
u32 max_xfer_blocks;
|
||||
u32 opt_xfer_blocks;
|
||||
u32 max_ws_blocks;
|
||||
@ -222,7 +242,7 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
|
||||
void sd_zbc_release_disk(struct scsi_disk *sdkp);
|
||||
int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
|
||||
int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]);
|
||||
int sd_zbc_revalidate_zones(struct scsi_disk *sdkp);
|
||||
blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
|
||||
unsigned char op, bool all);
|
||||
@ -238,8 +258,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
|
||||
|
||||
static inline void sd_zbc_release_disk(struct scsi_disk *sdkp) {}
|
||||
|
||||
static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
|
||||
unsigned char *buf)
|
||||
static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -59,8 +59,6 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
|
||||
bi.profile = &t10_pi_type1_crc;
|
||||
|
||||
bi.tuple_size = sizeof(struct t10_pi_tuple);
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"Enabling DIX %s protection\n", bi.profile->name);
|
||||
|
||||
if (dif && type) {
|
||||
bi.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
|
||||
@ -72,11 +70,11 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
|
||||
bi.tag_size = sizeof(u16) + sizeof(u32);
|
||||
else
|
||||
bi.tag_size = sizeof(u16);
|
||||
|
||||
sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
|
||||
bi.tag_size);
|
||||
}
|
||||
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"Enabling DIX %s, application tag size %u bytes\n",
|
||||
bi.profile->name, bi.tag_size);
|
||||
out:
|
||||
blk_integrity_register(disk, &bi);
|
||||
}
|
||||
|
@ -20,6 +20,12 @@
|
||||
|
||||
#include "sd.h"
|
||||
|
||||
/**
|
||||
* sd_zbc_get_zone_wp_offset - Get zone write pointer offset.
|
||||
* @zone: Zone for which to return the write pointer offset.
|
||||
*
|
||||
* Return: offset of the write pointer from the start of the zone.
|
||||
*/
|
||||
static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone)
|
||||
{
|
||||
if (zone->type == ZBC_ZONE_TYPE_CONV)
|
||||
@ -44,13 +50,37 @@ static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone)
|
||||
}
|
||||
}
|
||||
|
||||
static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
|
||||
/* Whether or not a SCSI zone descriptor describes a gap zone. */
|
||||
static bool sd_zbc_is_gap_zone(const u8 buf[64])
|
||||
{
|
||||
return (buf[0] & 0xf) == ZBC_ZONE_TYPE_GAP;
|
||||
}
|
||||
|
||||
/**
|
||||
* sd_zbc_parse_report - Parse a SCSI zone descriptor
|
||||
* @sdkp: SCSI disk pointer.
|
||||
* @buf: SCSI zone descriptor.
|
||||
* @idx: Index of the zone relative to the first zone reported by the current
|
||||
* sd_zbc_report_zones() call.
|
||||
* @cb: Callback function pointer.
|
||||
* @data: Second argument passed to @cb.
|
||||
*
|
||||
* Return: Value returned by @cb.
|
||||
*
|
||||
* Convert a SCSI zone descriptor into struct blk_zone format. Additionally,
|
||||
* call @cb(blk_zone, @data).
|
||||
*/
|
||||
static int sd_zbc_parse_report(struct scsi_disk *sdkp, const u8 buf[64],
|
||||
unsigned int idx, report_zones_cb cb, void *data)
|
||||
{
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
struct blk_zone zone = { 0 };
|
||||
sector_t start_lba, gran;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON_ONCE(sd_zbc_is_gap_zone(buf)))
|
||||
return -EINVAL;
|
||||
|
||||
zone.type = buf[0] & 0x0f;
|
||||
zone.cond = (buf[1] >> 4) & 0xf;
|
||||
if (buf[1] & 0x01)
|
||||
@ -58,9 +88,27 @@ static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
|
||||
if (buf[1] & 0x02)
|
||||
zone.non_seq = 1;
|
||||
|
||||
zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
|
||||
zone.capacity = zone.len;
|
||||
zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
|
||||
start_lba = get_unaligned_be64(&buf[16]);
|
||||
zone.start = logical_to_sectors(sdp, start_lba);
|
||||
zone.capacity = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
|
||||
zone.len = zone.capacity;
|
||||
if (sdkp->zone_starting_lba_gran) {
|
||||
gran = logical_to_sectors(sdp, sdkp->zone_starting_lba_gran);
|
||||
if (zone.len > gran) {
|
||||
sd_printk(KERN_ERR, sdkp,
|
||||
"Invalid zone at LBA %llu with capacity %llu and length %llu; granularity = %llu\n",
|
||||
start_lba,
|
||||
sectors_to_logical(sdp, zone.capacity),
|
||||
sectors_to_logical(sdp, zone.len),
|
||||
sectors_to_logical(sdp, gran));
|
||||
return -EINVAL;
|
||||
}
|
||||
/*
|
||||
* Use the starting LBA granularity instead of the zone length
|
||||
* obtained from the REPORT ZONES command.
|
||||
*/
|
||||
zone.len = gran;
|
||||
}
|
||||
if (zone.cond == ZBC_ZONE_COND_FULL)
|
||||
zone.wp = zone.start + zone.len;
|
||||
else
|
||||
@ -161,7 +209,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
|
||||
* sure that the allocated buffer can always be mapped by limiting the
|
||||
* number of pages allocated to the HBA max segments limit.
|
||||
*/
|
||||
nr_zones = min(nr_zones, sdkp->nr_zones);
|
||||
nr_zones = min(nr_zones, sdkp->zone_info.nr_zones);
|
||||
bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
|
||||
bufsize = min_t(size_t, bufsize,
|
||||
queue_max_hw_sectors(q) << SECTOR_SHIFT);
|
||||
@ -186,16 +234,28 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
|
||||
*/
|
||||
static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
|
||||
{
|
||||
return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
|
||||
return logical_to_sectors(sdkp->device, sdkp->zone_info.zone_blocks);
|
||||
}
|
||||
|
||||
/**
|
||||
* sd_zbc_report_zones - SCSI .report_zones() callback.
|
||||
* @disk: Disk to report zones for.
|
||||
* @sector: Start sector.
|
||||
* @nr_zones: Maximum number of zones to report.
|
||||
* @cb: Callback function called to report zone information.
|
||||
* @data: Second argument passed to @cb.
|
||||
*
|
||||
* Called by the block layer to iterate over zone information. See also the
|
||||
* disk->fops->report_zones() calls in block/blk-zoned.c.
|
||||
*/
|
||||
int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
|
||||
unsigned int nr_zones, report_zones_cb cb, void *data)
|
||||
{
|
||||
struct scsi_disk *sdkp = scsi_disk(disk);
|
||||
sector_t capacity = logical_to_sectors(sdkp->device, sdkp->capacity);
|
||||
sector_t lba = sectors_to_logical(sdkp->device, sector);
|
||||
unsigned int nr, i;
|
||||
unsigned char *buf;
|
||||
u64 zone_length, start_lba;
|
||||
size_t offset, buflen = 0;
|
||||
int zone_idx = 0;
|
||||
int ret;
|
||||
@ -204,7 +264,7 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
|
||||
/* Not a zoned device */
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!capacity)
|
||||
if (!sdkp->capacity)
|
||||
/* Device gone or invalid */
|
||||
return -ENODEV;
|
||||
|
||||
@ -212,9 +272,8 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
while (zone_idx < nr_zones && sector < capacity) {
|
||||
ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
|
||||
sectors_to_logical(sdkp->device, sector), true);
|
||||
while (zone_idx < nr_zones && lba < sdkp->capacity) {
|
||||
ret = sd_zbc_do_report_zones(sdkp, buf, buflen, lba, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -225,14 +284,36 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
|
||||
|
||||
for (i = 0; i < nr && zone_idx < nr_zones; i++) {
|
||||
offset += 64;
|
||||
start_lba = get_unaligned_be64(&buf[offset + 16]);
|
||||
zone_length = get_unaligned_be64(&buf[offset + 8]);
|
||||
if ((zone_idx == 0 &&
|
||||
(lba < start_lba ||
|
||||
lba >= start_lba + zone_length)) ||
|
||||
(zone_idx > 0 && start_lba != lba) ||
|
||||
start_lba + zone_length < start_lba) {
|
||||
sd_printk(KERN_ERR, sdkp,
|
||||
"Zone %d at LBA %llu is invalid: %llu + %llu\n",
|
||||
zone_idx, lba, start_lba, zone_length);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
lba = start_lba + zone_length;
|
||||
if (sd_zbc_is_gap_zone(&buf[offset])) {
|
||||
if (sdkp->zone_starting_lba_gran)
|
||||
continue;
|
||||
sd_printk(KERN_ERR, sdkp,
|
||||
"Gap zone without constant LBA offsets\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx,
|
||||
cb, data);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
zone_idx++;
|
||||
}
|
||||
|
||||
sector += sd_zbc_zone_sectors(sdkp) * i;
|
||||
}
|
||||
|
||||
ret = zone_idx;
|
||||
@ -276,6 +357,10 @@ static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* An attempt to append a zone triggered an invalid write pointer error.
|
||||
* Reread the write pointer of the zone(s) in which the append failed.
|
||||
*/
|
||||
static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
|
||||
{
|
||||
struct scsi_disk *sdkp;
|
||||
@ -286,14 +371,14 @@ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
|
||||
sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
|
||||
|
||||
spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
|
||||
for (zno = 0; zno < sdkp->nr_zones; zno++) {
|
||||
for (zno = 0; zno < sdkp->zone_info.nr_zones; zno++) {
|
||||
if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
|
||||
continue;
|
||||
|
||||
spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
|
||||
ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf,
|
||||
SD_BUF_SIZE,
|
||||
zno * sdkp->zone_blocks, true);
|
||||
zno * sdkp->zone_info.zone_blocks, true);
|
||||
spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
|
||||
if (!ret)
|
||||
sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64,
|
||||
@ -360,7 +445,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
|
||||
break;
|
||||
default:
|
||||
wp_offset = sectors_to_logical(sdkp->device, wp_offset);
|
||||
if (wp_offset + nr_blocks > sdkp->zone_blocks) {
|
||||
if (wp_offset + nr_blocks > sdkp->zone_info.zone_blocks) {
|
||||
ret = BLK_STS_IOERR;
|
||||
break;
|
||||
}
|
||||
@ -489,7 +574,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
|
||||
break;
|
||||
case REQ_OP_ZONE_RESET_ALL:
|
||||
memset(sdkp->zones_wp_offset, 0,
|
||||
sdkp->nr_zones * sizeof(unsigned int));
|
||||
sdkp->zone_info.nr_zones * sizeof(unsigned int));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -545,6 +630,7 @@ unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
|
||||
static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
|
||||
unsigned char *buf)
|
||||
{
|
||||
u64 zone_starting_lba_gran;
|
||||
|
||||
if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
@ -558,12 +644,36 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
|
||||
sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
|
||||
sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
|
||||
sdkp->zones_max_open = 0;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Host-managed */
|
||||
sdkp->urswrz = buf[4] & 1;
|
||||
sdkp->zones_optimal_open = 0;
|
||||
sdkp->zones_optimal_nonseq = 0;
|
||||
sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
|
||||
/* Check zone alignment method */
|
||||
switch (buf[23] & 0xf) {
|
||||
case 0:
|
||||
case ZBC_CONSTANT_ZONE_LENGTH:
|
||||
/* Use zone length */
|
||||
break;
|
||||
case ZBC_CONSTANT_ZONE_START_OFFSET:
|
||||
zone_starting_lba_gran = get_unaligned_be64(&buf[24]);
|
||||
if (zone_starting_lba_gran == 0 ||
|
||||
!is_power_of_2(zone_starting_lba_gran) ||
|
||||
logical_to_sectors(sdkp->device, zone_starting_lba_gran) >
|
||||
UINT_MAX) {
|
||||
sd_printk(KERN_ERR, sdkp,
|
||||
"Invalid zone starting LBA granularity %llu\n",
|
||||
zone_starting_lba_gran);
|
||||
return -ENODEV;
|
||||
}
|
||||
sdkp->zone_starting_lba_gran = zone_starting_lba_gran;
|
||||
break;
|
||||
default:
|
||||
sd_printk(KERN_ERR, sdkp, "Invalid zone alignment method\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -585,7 +695,7 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
|
||||
* sd_zbc_check_capacity - Check the device capacity
|
||||
* @sdkp: Target disk
|
||||
* @buf: command buffer
|
||||
* @zblocks: zone size in number of blocks
|
||||
* @zblocks: zone size in logical blocks
|
||||
*
|
||||
* Get the device zone size and check that the device capacity as reported
|
||||
* by READ CAPACITY matches the max_lba value (plus one) of the report zones
|
||||
@ -619,6 +729,7 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
|
||||
}
|
||||
}
|
||||
|
||||
if (sdkp->zone_starting_lba_gran == 0) {
|
||||
/* Get the size of the first reported zone */
|
||||
rec = buf + 64;
|
||||
zone_blocks = get_unaligned_be64(&rec[8]);
|
||||
@ -628,6 +739,16 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
|
||||
"Zone size too large\n");
|
||||
return -EFBIG;
|
||||
}
|
||||
} else {
|
||||
zone_blocks = sdkp->zone_starting_lba_gran;
|
||||
}
|
||||
|
||||
if (!is_power_of_2(zone_blocks)) {
|
||||
sd_printk(KERN_ERR, sdkp,
|
||||
"Zone size %llu is not a power of two.\n",
|
||||
zone_blocks);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*zblocks = zone_blocks;
|
||||
|
||||
@ -639,16 +760,16 @@ static void sd_zbc_print_zones(struct scsi_disk *sdkp)
|
||||
if (!sd_is_zoned(sdkp) || !sdkp->capacity)
|
||||
return;
|
||||
|
||||
if (sdkp->capacity & (sdkp->zone_blocks - 1))
|
||||
if (sdkp->capacity & (sdkp->zone_info.zone_blocks - 1))
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"%u zones of %u logical blocks + 1 runt zone\n",
|
||||
sdkp->nr_zones - 1,
|
||||
sdkp->zone_blocks);
|
||||
sdkp->zone_info.nr_zones - 1,
|
||||
sdkp->zone_info.zone_blocks);
|
||||
else
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"%u zones of %u logical blocks\n",
|
||||
sdkp->nr_zones,
|
||||
sdkp->zone_blocks);
|
||||
sdkp->zone_info.nr_zones,
|
||||
sdkp->zone_info.zone_blocks);
|
||||
}
|
||||
|
||||
static int sd_zbc_init_disk(struct scsi_disk *sdkp)
|
||||
@ -675,10 +796,8 @@ static void sd_zbc_clear_zone_info(struct scsi_disk *sdkp)
|
||||
kfree(sdkp->zone_wp_update_buf);
|
||||
sdkp->zone_wp_update_buf = NULL;
|
||||
|
||||
sdkp->nr_zones = 0;
|
||||
sdkp->rev_nr_zones = 0;
|
||||
sdkp->zone_blocks = 0;
|
||||
sdkp->rev_zone_blocks = 0;
|
||||
sdkp->early_zone_info = (struct zoned_disk_info){ };
|
||||
sdkp->zone_info = (struct zoned_disk_info){ };
|
||||
|
||||
mutex_unlock(&sdkp->rev_mutex);
|
||||
}
|
||||
@ -696,12 +815,17 @@ static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
|
||||
swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* Call blk_revalidate_disk_zones() if any of the zoned disk properties have
|
||||
* changed that make it necessary to call that function. Called by
|
||||
* sd_revalidate_disk() after the gendisk capacity has been set.
|
||||
*/
|
||||
int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
|
||||
{
|
||||
struct gendisk *disk = sdkp->disk;
|
||||
struct request_queue *q = disk->queue;
|
||||
u32 zone_blocks = sdkp->rev_zone_blocks;
|
||||
unsigned int nr_zones = sdkp->rev_nr_zones;
|
||||
u32 zone_blocks = sdkp->early_zone_info.zone_blocks;
|
||||
unsigned int nr_zones = sdkp->early_zone_info.nr_zones;
|
||||
u32 max_append;
|
||||
int ret = 0;
|
||||
unsigned int flags;
|
||||
@ -732,14 +856,14 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
|
||||
*/
|
||||
mutex_lock(&sdkp->rev_mutex);
|
||||
|
||||
if (sdkp->zone_blocks == zone_blocks &&
|
||||
sdkp->nr_zones == nr_zones &&
|
||||
if (sdkp->zone_info.zone_blocks == zone_blocks &&
|
||||
sdkp->zone_info.nr_zones == nr_zones &&
|
||||
disk->queue->nr_zones == nr_zones)
|
||||
goto unlock;
|
||||
|
||||
flags = memalloc_noio_save();
|
||||
sdkp->zone_blocks = zone_blocks;
|
||||
sdkp->nr_zones = nr_zones;
|
||||
sdkp->zone_info.zone_blocks = zone_blocks;
|
||||
sdkp->zone_info.nr_zones = nr_zones;
|
||||
sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL);
|
||||
if (!sdkp->rev_wp_offset) {
|
||||
ret = -ENOMEM;
|
||||
@ -754,8 +878,7 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
|
||||
sdkp->rev_wp_offset = NULL;
|
||||
|
||||
if (ret) {
|
||||
sdkp->zone_blocks = 0;
|
||||
sdkp->nr_zones = 0;
|
||||
sdkp->zone_info = (struct zoned_disk_info){ };
|
||||
sdkp->capacity = 0;
|
||||
goto unlock;
|
||||
}
|
||||
@ -774,7 +897,16 @@ unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
|
||||
/**
|
||||
* sd_zbc_read_zones - Read zone information and update the request queue
|
||||
* @sdkp: SCSI disk pointer.
|
||||
* @buf: 512 byte buffer used for storing SCSI command output.
|
||||
*
|
||||
* Read zone information and update the request queue zone characteristics and
|
||||
* also the zoned device information in *sdkp. Called by sd_revalidate_disk()
|
||||
* before the gendisk capacity has been set.
|
||||
*/
|
||||
int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
|
||||
{
|
||||
struct gendisk *disk = sdkp->disk;
|
||||
struct request_queue *q = disk->queue;
|
||||
@ -832,8 +964,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
|
||||
if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
|
||||
blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
|
||||
|
||||
sdkp->rev_nr_zones = nr_zones;
|
||||
sdkp->rev_zone_blocks = zone_blocks;
|
||||
sdkp->early_zone_info.nr_zones = nr_zones;
|
||||
sdkp->early_zone_info.zone_blocks = zone_blocks;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -113,7 +113,7 @@ static int sr_open(struct cdrom_device_info *, int);
|
||||
static void sr_release(struct cdrom_device_info *);
|
||||
|
||||
static void get_sectorsize(struct scsi_cd *);
|
||||
static void get_capabilities(struct scsi_cd *);
|
||||
static int get_capabilities(struct scsi_cd *);
|
||||
|
||||
static unsigned int sr_check_events(struct cdrom_device_info *cdi,
|
||||
unsigned int clearing, int slot);
|
||||
@ -669,8 +669,9 @@ static int sr_probe(struct device *dev)
|
||||
|
||||
sdev->sector_size = 2048; /* A guess, just in case */
|
||||
|
||||
/* FIXME: need to handle a get_capabilities failure properly ?? */
|
||||
get_capabilities(cd);
|
||||
error = -ENOMEM;
|
||||
if (get_capabilities(cd))
|
||||
goto fail_minor;
|
||||
sr_vendor_init(cd);
|
||||
|
||||
set_capacity(disk, cd->capacity);
|
||||
@ -794,7 +795,7 @@ static void get_sectorsize(struct scsi_cd *cd)
|
||||
return;
|
||||
}
|
||||
|
||||
static void get_capabilities(struct scsi_cd *cd)
|
||||
static int get_capabilities(struct scsi_cd *cd)
|
||||
{
|
||||
unsigned char *buffer;
|
||||
struct scsi_mode_data data;
|
||||
@ -819,7 +820,7 @@ static void get_capabilities(struct scsi_cd *cd)
|
||||
buffer = kmalloc(512, GFP_KERNEL);
|
||||
if (!buffer) {
|
||||
sr_printk(KERN_ERR, cd, "out of memory.\n");
|
||||
return;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* eat unit attentions */
|
||||
@ -839,7 +840,7 @@ static void get_capabilities(struct scsi_cd *cd)
|
||||
CDC_MRW | CDC_MRW_W | CDC_RAM);
|
||||
kfree(buffer);
|
||||
sr_printk(KERN_INFO, cd, "scsi-1 drive");
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
n = data.header_length + data.block_descriptor_length;
|
||||
@ -898,6 +899,7 @@ static void get_capabilities(struct scsi_cd *cd)
|
||||
}
|
||||
|
||||
kfree(buffer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1,36 +1,12 @@
|
||||
# SPDX-License-Identifier: GPL-2.0+
|
||||
#
|
||||
# Kernel configuration file for the UFS Host Controller
|
||||
#
|
||||
# This code is based on drivers/scsi/ufs/Kconfig
|
||||
# Copyright (C) 2011-2013 Samsung India Software Operations
|
||||
#
|
||||
# Authors:
|
||||
# Santosh Yaraganavi <santosh.sy@samsung.com>
|
||||
# Vinayak Holikatti <h.vinayak@samsung.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
# See the COPYING file in the top-level directory or visit
|
||||
# <http://www.gnu.org/licenses/gpl-2.0.html>
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# This program is provided "AS IS" and "WITH ALL FAULTS" and
|
||||
# without warranty of any kind. You are solely responsible for
|
||||
# determining the appropriateness of using and distributing
|
||||
# the program and assume all risks associated with your exercise
|
||||
# of rights with respect to the program, including but not limited
|
||||
# to infringement of third party rights, the risks and costs of
|
||||
# program errors, damage to or loss of data, programs or equipment,
|
||||
# and unavailability or interruption of operations. Under no
|
||||
# circumstances will the contributor of this Program be liable for
|
||||
# any damages of any kind arising from your use or distribution of
|
||||
# this program.
|
||||
|
||||
config SCSI_UFSHCD
|
||||
tristate "Universal Flash Storage Controller Driver Core"
|
||||
|
@ -9,6 +9,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
@ -340,4 +341,3 @@ module_platform_driver(cdns_ufs_pltfrm_driver);
|
||||
MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
|
||||
MODULE_DESCRIPTION("Cadence UFS host controller platform driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_VERSION(UFSHCD_DRIVER_VERSION);
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "ufshcd-dwc.h"
|
||||
#include "tc-dwc-g210.h"
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include "ufshcd-pltfrm.h"
|
||||
#include "ufshcd-dwc.h"
|
||||
|
@ -7,6 +7,8 @@
|
||||
* Authors: Joao Pinto <jpinto@synopsys.com>
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "ufshcd.h"
|
||||
#include "unipro.h"
|
||||
|
||||
|
@ -10,6 +10,8 @@
|
||||
#ifndef _TC_DWC_G210_H
|
||||
#define _TC_DWC_G210_H
|
||||
|
||||
struct ufs_hba;
|
||||
|
||||
int tc_dwc_g210_config_40_bit(struct ufs_hba *hba);
|
||||
int tc_dwc_g210_config_20_bit(struct ufs_hba *hba);
|
||||
|
||||
|
@ -29,11 +29,9 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
|
||||
return PTR_ERR(regbase);
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_noidle(dev);
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret < 0)
|
||||
goto disable_pm;
|
||||
}
|
||||
|
||||
/* Select MPHY refclk frequency */
|
||||
clk = devm_clk_get(dev, NULL);
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include "ufs-debugfs.h"
|
||||
#include "ufshcd.h"
|
||||
#include "ufshcd-priv.h"
|
||||
|
||||
static struct dentry *ufs_debugfs_root;
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
@ -704,7 +705,7 @@ static void exynos_ufs_establish_connt(struct exynos_ufs *ufs)
|
||||
|
||||
/* local unipro attributes */
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID), DEV_ID);
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), TRUE);
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), true);
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), PEER_DEV_ID);
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), PEER_CPORT_ID);
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), CPORT_DEF_FLAGS);
|
||||
@ -1028,7 +1029,7 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
|
||||
|
||||
if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)
|
||||
ufshcd_dme_set(hba,
|
||||
UIC_ARG_MIB(T_DBG_SKIP_INIT_HIBERN8_EXIT), TRUE);
|
||||
UIC_ARG_MIB(T_DBG_SKIP_INIT_HIBERN8_EXIT), true);
|
||||
|
||||
if (attr->pa_granularity) {
|
||||
exynos_ufs_enable_dbg_mode(hba);
|
||||
|
@ -248,22 +248,22 @@ long exynos_ufs_calc_time_cntr(struct exynos_ufs *, long);
|
||||
|
||||
static inline void exynos_ufs_enable_ov_tm(struct ufs_hba *hba)
|
||||
{
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), TRUE);
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), true);
|
||||
}
|
||||
|
||||
static inline void exynos_ufs_disable_ov_tm(struct ufs_hba *hba)
|
||||
{
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), FALSE);
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), false);
|
||||
}
|
||||
|
||||
static inline void exynos_ufs_enable_dbg_mode(struct ufs_hba *hba)
|
||||
{
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), TRUE);
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), true);
|
||||
}
|
||||
|
||||
static inline void exynos_ufs_disable_dbg_mode(struct ufs_hba *hba)
|
||||
{
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), FALSE);
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), false);
|
||||
}
|
||||
|
||||
#endif /* _UFS_EXYNOS_H_ */
|
||||
|
@ -7,6 +7,8 @@
|
||||
*/
|
||||
|
||||
#include <linux/time.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/units.h>
|
||||
|
||||
#include "ufshcd.h"
|
||||
#include "ufshcd-priv.h"
|
||||
|
||||
struct ufs_hwmon_data {
|
||||
struct ufs_hba *hba;
|
||||
|
@ -8,6 +8,9 @@
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
@ -19,7 +22,6 @@
|
||||
#include <linux/soc/mediatek/mtk_sip_svc.h>
|
||||
|
||||
#include "ufshcd.h"
|
||||
#include "ufshcd-crypto.h"
|
||||
#include "ufshcd-pltfrm.h"
|
||||
#include "ufs_quirks.h"
|
||||
#include "unipro.h"
|
||||
@ -44,12 +46,14 @@
|
||||
#define ufs_mtk_device_reset_ctrl(high, res) \
|
||||
ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
|
||||
|
||||
static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
|
||||
UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
|
||||
UFS_DEVICE_QUIRK_DELAY_AFTER_LPM),
|
||||
UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
|
||||
UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
|
||||
END_FIX
|
||||
static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
|
||||
{ .wmanufacturerid = UFS_VENDOR_MICRON,
|
||||
.model = UFS_ANY_MODEL,
|
||||
.quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM },
|
||||
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
|
||||
.model = "H9HQ21AFAMZDAR",
|
||||
.quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct of_device_id ufs_mtk_of_match[] = {
|
||||
@ -169,7 +173,6 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status status)
|
||||
{
|
||||
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
|
||||
unsigned long flags;
|
||||
|
||||
if (status == PRE_CHANGE) {
|
||||
if (host->unipro_lpm) {
|
||||
@ -183,12 +186,8 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
|
||||
ufs_mtk_crypto_enable(hba);
|
||||
|
||||
if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
ufshcd_writel(hba, 0,
|
||||
REG_AUTO_HIBERNATE_IDLE_TIMER);
|
||||
spin_unlock_irqrestore(hba->host->host_lock,
|
||||
flags);
|
||||
|
||||
hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
|
||||
hba->ahit = 0;
|
||||
}
|
||||
@ -860,7 +859,6 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
|
||||
|
||||
static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 ah_ms;
|
||||
|
||||
if (ufshcd_is_clkgating_allowed(hba)) {
|
||||
@ -869,9 +867,7 @@ static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
|
||||
hba->ahit);
|
||||
else
|
||||
ah_ms = 10;
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
hba->clk_gating.delay_ms = ah_ms + 5;
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
|
||||
}
|
||||
}
|
||||
|
||||
@ -992,13 +988,10 @@ static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
|
||||
|
||||
static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/* disable auto-hibern8 */
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
/* wait host return to idle state when auto-hibern8 off */
|
||||
ufs_mtk_wait_idle_state(hba, 5);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user