forked from Minki/linux
Merge branch '5.15/scsi-fixes' into 5.16/scsi-queue
The partial UFS revert in 5.15 is needed for some additional fixes in the 5.16 SCSI tree. Merge the fixes branch. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
6266f7df38
@ -220,7 +220,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
shost->cmd_per_lun = min_t(short, shost->cmd_per_lun,
|
||||
/* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
|
||||
shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
|
||||
shost->can_queue);
|
||||
|
||||
error = scsi_init_sense_cache(shost);
|
||||
|
@ -1696,6 +1696,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
|
||||
|
||||
spin_lock_irqsave(&evt->queue->l_lock, flags);
|
||||
list_add_tail(&evt->queue_list, &evt->queue->sent);
|
||||
atomic_set(&evt->active, 1);
|
||||
|
||||
mb();
|
||||
|
||||
@ -1710,6 +1711,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
|
||||
be64_to_cpu(crq_as_u64[1]));
|
||||
|
||||
if (rc) {
|
||||
atomic_set(&evt->active, 0);
|
||||
list_del(&evt->queue_list);
|
||||
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
|
||||
del_timer(&evt->timer);
|
||||
@ -1737,7 +1739,6 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
|
||||
|
||||
evt->done(evt);
|
||||
} else {
|
||||
atomic_set(&evt->active, 1);
|
||||
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
|
||||
ibmvfc_trc_start(evt);
|
||||
}
|
||||
|
@ -3736,7 +3736,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
shost->max_lun = -1;
|
||||
shost->unique_id = mrioc->id;
|
||||
|
||||
shost->max_channel = 1;
|
||||
shost->max_channel = 0;
|
||||
shost->max_id = 0xFFFFFFFF;
|
||||
|
||||
if (prot_mask >= 0)
|
||||
|
@ -5065,9 +5065,12 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
|
||||
if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
|
||||
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
|
||||
|
||||
if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
|
||||
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
|
||||
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
|
||||
if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
|
||||
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
|
||||
|
||||
if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
|
||||
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
|
||||
|
||||
mpi_request->CDB.EEDP32.PrimaryReferenceTag =
|
||||
cpu_to_be32(scsi_prot_ref_tag(scmd));
|
||||
}
|
||||
|
@ -431,7 +431,7 @@ done_unmap_sg:
|
||||
goto done_free_fcport;
|
||||
|
||||
done_free_fcport:
|
||||
if (bsg_request->msgcode == FC_BSG_RPT_ELS)
|
||||
if (bsg_request->msgcode != FC_BSG_RPT_ELS)
|
||||
qla2x00_free_fcport(fcport);
|
||||
done:
|
||||
return rval;
|
||||
|
@ -4151,7 +4151,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
|
||||
ql_dbg_pci(ql_dbg_init, ha->pdev,
|
||||
0xe0ee, "%s: failed alloc dsd\n",
|
||||
__func__);
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
ha->dif_bundle_kallocs++;
|
||||
|
||||
|
@ -3320,8 +3320,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
|
||||
"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
|
||||
vha->flags.online, qla2x00_reset_active(vha),
|
||||
cmd->reset_count, qpair->chip_reset);
|
||||
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
|
||||
return 0;
|
||||
goto out_unmap_unlock;
|
||||
}
|
||||
|
||||
/* Does F/W have an IOCBs for this request */
|
||||
@ -3446,10 +3445,6 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
|
||||
prm.sg = NULL;
|
||||
prm.req_cnt = 1;
|
||||
|
||||
/* Calculate number of entries and segments required */
|
||||
if (qlt_pci_map_calc_cnt(&prm) != 0)
|
||||
return -EAGAIN;
|
||||
|
||||
if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
|
||||
(cmd->sess && cmd->sess->deleted)) {
|
||||
/*
|
||||
@ -3467,6 +3462,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Calculate number of entries and segments required */
|
||||
if (qlt_pci_map_calc_cnt(&prm) != 0)
|
||||
return -EAGAIN;
|
||||
|
||||
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
|
||||
/* Does F/W have an IOCBs for this request */
|
||||
res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
|
||||
@ -3871,9 +3870,6 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
|
||||
|
||||
BUG_ON(cmd->cmd_in_wq);
|
||||
|
||||
if (cmd->sg_mapped)
|
||||
qlt_unmap_sg(cmd->vha, cmd);
|
||||
|
||||
if (!cmd->q_full)
|
||||
qlt_decr_num_pend_cmds(cmd->vha);
|
||||
|
||||
|
@ -545,8 +545,10 @@ EXPORT_SYMBOL(scsi_device_get);
|
||||
*/
|
||||
void scsi_device_put(struct scsi_device *sdev)
|
||||
{
|
||||
module_put(sdev->host->hostt->module);
|
||||
struct module *mod = sdev->host->hostt->module;
|
||||
|
||||
put_device(&sdev->sdev_gendev);
|
||||
module_put(mod);
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_device_put);
|
||||
|
||||
|
@ -444,9 +444,12 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
|
||||
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
|
||||
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
|
||||
unsigned long flags;
|
||||
struct module *mod;
|
||||
|
||||
sdev = container_of(work, struct scsi_device, ew.work);
|
||||
|
||||
mod = sdev->host->hostt->module;
|
||||
|
||||
scsi_dh_release_device(sdev);
|
||||
|
||||
parent = sdev->sdev_gendev.parent;
|
||||
@ -497,11 +500,17 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
|
||||
|
||||
if (parent)
|
||||
put_device(parent);
|
||||
module_put(mod);
|
||||
}
|
||||
|
||||
static void scsi_device_dev_release(struct device *dev)
|
||||
{
|
||||
struct scsi_device *sdp = to_scsi_device(dev);
|
||||
|
||||
/* Set module pointer as NULL in case of module unloading */
|
||||
if (!try_module_get(sdp->host->hostt->module))
|
||||
sdp->host->hostt->module = NULL;
|
||||
|
||||
execute_in_process_context(scsi_device_dev_release_usercontext,
|
||||
&sdp->ew);
|
||||
}
|
||||
|
@ -2930,8 +2930,6 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
|
||||
session->recovery_tmo = value;
|
||||
break;
|
||||
default:
|
||||
err = transport->set_param(conn, ev->u.set_param.param,
|
||||
data, ev->u.set_param.len);
|
||||
if ((conn->state == ISCSI_CONN_BOUND) ||
|
||||
(conn->state == ISCSI_CONN_UP)) {
|
||||
err = transport->set_param(conn, ev->u.set_param.param,
|
||||
|
@ -3707,7 +3707,12 @@ static int sd_resume_system(struct device *dev)
|
||||
static int sd_resume_runtime(struct device *dev)
|
||||
{
|
||||
struct scsi_disk *sdkp = dev_get_drvdata(dev);
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
struct scsi_device *sdp;
|
||||
|
||||
if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
|
||||
return 0;
|
||||
|
||||
sdp = sdkp->device;
|
||||
|
||||
if (sdp->ignore_media_change) {
|
||||
/* clear the device's sense data */
|
||||
|
@ -1285,11 +1285,15 @@ static void storvsc_on_channel_callback(void *context)
|
||||
foreach_vmbus_pkt(desc, channel) {
|
||||
struct vstor_packet *packet = hv_pkt_data(desc);
|
||||
struct storvsc_cmd_request *request = NULL;
|
||||
u32 pktlen = hv_pkt_datalen(desc);
|
||||
u64 rqst_id = desc->trans_id;
|
||||
u32 minlen = rqst_id ? sizeof(struct vstor_packet) -
|
||||
stor_device->vmscsi_size_delta : sizeof(enum vstor_packet_operation);
|
||||
|
||||
if (hv_pkt_datalen(desc) < sizeof(struct vstor_packet) -
|
||||
stor_device->vmscsi_size_delta) {
|
||||
dev_err(&device->device, "Invalid packet len\n");
|
||||
if (pktlen < minlen) {
|
||||
dev_err(&device->device,
|
||||
"Invalid pkt: id=%llu, len=%u, minlen=%u\n",
|
||||
rqst_id, pktlen, minlen);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1302,13 +1306,23 @@ static void storvsc_on_channel_callback(void *context)
|
||||
if (rqst_id == 0) {
|
||||
/*
|
||||
* storvsc_on_receive() looks at the vstor_packet in the message
|
||||
* from the ring buffer. If the operation in the vstor_packet is
|
||||
* COMPLETE_IO, then we call storvsc_on_io_completion(), and
|
||||
* dereference the guest memory address. Make sure we don't call
|
||||
* storvsc_on_io_completion() with a guest memory address that is
|
||||
* zero if Hyper-V were to construct and send such a bogus packet.
|
||||
* from the ring buffer.
|
||||
*
|
||||
* - If the operation in the vstor_packet is COMPLETE_IO, then
|
||||
* we call storvsc_on_io_completion(), and dereference the
|
||||
* guest memory address. Make sure we don't call
|
||||
* storvsc_on_io_completion() with a guest memory address
|
||||
* that is zero if Hyper-V were to construct and send such
|
||||
* a bogus packet.
|
||||
*
|
||||
* - If the operation in the vstor_packet is FCHBA_DATA, then
|
||||
* we call cache_wwn(), and access the data payload area of
|
||||
* the packet (wwn_packet); however, there is no guarantee
|
||||
* that the packet is big enough to contain such area.
|
||||
* Future-proof the code by rejecting such a bogus packet.
|
||||
*/
|
||||
if (packet->operation == VSTOR_OPERATION_COMPLETE_IO) {
|
||||
if (packet->operation == VSTOR_OPERATION_COMPLETE_IO ||
|
||||
packet->operation == VSTOR_OPERATION_FCHBA_DATA) {
|
||||
dev_err(&device->device, "Invalid packet with ID of 0\n");
|
||||
continue;
|
||||
}
|
||||
|
@ -802,9 +802,9 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
|
||||
}
|
||||
|
||||
/* setting for three timeout values for traffic class #0 */
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 8064);
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 28224);
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 20160);
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064);
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224);
|
||||
ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
|
@ -370,20 +370,6 @@ static void ufs_intel_common_exit(struct ufs_hba *hba)
|
||||
|
||||
static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
|
||||
{
|
||||
/*
|
||||
* To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
|
||||
* address registers must be restored because the restore kernel can
|
||||
* have used different addresses.
|
||||
*/
|
||||
ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
|
||||
REG_UTP_TRANSFER_REQ_LIST_BASE_L);
|
||||
ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
|
||||
REG_UTP_TRANSFER_REQ_LIST_BASE_H);
|
||||
ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
|
||||
REG_UTP_TASK_REQ_LIST_BASE_L);
|
||||
ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
|
||||
REG_UTP_TASK_REQ_LIST_BASE_H);
|
||||
|
||||
if (ufshcd_is_link_hibern8(hba)) {
|
||||
int ret = ufshcd_uic_hibern8_exit(hba);
|
||||
|
||||
@ -463,6 +449,18 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
|
||||
.device_reset = ufs_intel_device_reset,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int ufshcd_pci_restore(struct device *dev)
|
||||
{
|
||||
struct ufs_hba *hba = dev_get_drvdata(dev);
|
||||
|
||||
/* Force a full reset and restore */
|
||||
ufshcd_set_link_off(hba);
|
||||
|
||||
return ufshcd_system_resume(dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ufshcd_pci_shutdown - main function to put the controller in reset state
|
||||
* @pdev: pointer to PCI device handle
|
||||
@ -546,9 +544,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
|
||||
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.suspend = ufshcd_system_suspend,
|
||||
.resume = ufshcd_system_resume,
|
||||
.freeze = ufshcd_system_suspend,
|
||||
.thaw = ufshcd_system_resume,
|
||||
.poweroff = ufshcd_system_suspend,
|
||||
.restore = ufshcd_pci_restore,
|
||||
.prepare = ufshcd_suspend_prepare,
|
||||
.complete = ufshcd_resume_complete,
|
||||
#endif
|
||||
|
@ -2767,12 +2767,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
|
||||
lrbp->req_abort_skip = false;
|
||||
|
||||
err = ufshpb_prep(hba, lrbp);
|
||||
if (err == -EAGAIN) {
|
||||
lrbp->cmd = NULL;
|
||||
ufshcd_release(hba);
|
||||
goto out;
|
||||
}
|
||||
ufshpb_prep(hba, lrbp);
|
||||
|
||||
ufshcd_comp_scsi_upiu(hba, lrbp);
|
||||
|
||||
|
@ -84,16 +84,6 @@ static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
|
||||
return transfer_len <= hpb->pre_req_max_tr_len;
|
||||
}
|
||||
|
||||
/*
|
||||
* In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as
|
||||
* default. It is possible to change range of transfer_len through sysfs.
|
||||
*/
|
||||
static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len)
|
||||
{
|
||||
return len > hpb->pre_req_min_tr_len &&
|
||||
len <= hpb->pre_req_max_tr_len;
|
||||
}
|
||||
|
||||
static bool ufshpb_is_general_lun(int lun)
|
||||
{
|
||||
return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
|
||||
@ -334,7 +324,7 @@ ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
|
||||
|
||||
static void
|
||||
ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
|
||||
__be64 ppn, u8 transfer_len, int read_id)
|
||||
__be64 ppn, u8 transfer_len)
|
||||
{
|
||||
unsigned char *cdb = lrbp->cmd->cmnd;
|
||||
__be64 ppn_tmp = ppn;
|
||||
@ -346,256 +336,11 @@ ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
|
||||
/* ppn value is stored as big-endian in the host memory */
|
||||
memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
|
||||
cdb[14] = transfer_len;
|
||||
cdb[15] = read_id;
|
||||
cdb[15] = 0;
|
||||
|
||||
lrbp->cmd->cmd_len = UFS_CDB_SIZE;
|
||||
}
|
||||
|
||||
static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb,
|
||||
unsigned long lpn, unsigned int len,
|
||||
int read_id)
|
||||
{
|
||||
cdb[0] = UFSHPB_WRITE_BUFFER;
|
||||
cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID;
|
||||
|
||||
put_unaligned_be32(lpn, &cdb[2]);
|
||||
cdb[6] = read_id;
|
||||
put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]);
|
||||
|
||||
cdb[9] = 0x00; /* Control = 0x00 */
|
||||
}
|
||||
|
||||
static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb)
|
||||
{
|
||||
struct ufshpb_req *pre_req;
|
||||
|
||||
if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) {
|
||||
dev_info(&hpb->sdev_ufs_lu->sdev_dev,
|
||||
"pre_req throttle. inflight %d throttle %d",
|
||||
hpb->num_inflight_pre_req, hpb->throttle_pre_req);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free,
|
||||
struct ufshpb_req, list_req);
|
||||
if (!pre_req) {
|
||||
dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
list_del_init(&pre_req->list_req);
|
||||
hpb->num_inflight_pre_req++;
|
||||
|
||||
return pre_req;
|
||||
}
|
||||
|
||||
static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb,
|
||||
struct ufshpb_req *pre_req)
|
||||
{
|
||||
pre_req->req = NULL;
|
||||
bio_reset(pre_req->bio);
|
||||
list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
|
||||
hpb->num_inflight_pre_req--;
|
||||
}
|
||||
|
||||
static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error)
|
||||
{
|
||||
struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data;
|
||||
struct ufshpb_lu *hpb = pre_req->hpb;
|
||||
unsigned long flags;
|
||||
|
||||
if (error) {
|
||||
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
|
||||
struct scsi_sense_hdr sshdr;
|
||||
|
||||
dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error);
|
||||
scsi_command_normalize_sense(cmd, &sshdr);
|
||||
dev_err(&hpb->sdev_ufs_lu->sdev_dev,
|
||||
"code %x sense_key %x asc %x ascq %x",
|
||||
sshdr.response_code,
|
||||
sshdr.sense_key, sshdr.asc, sshdr.ascq);
|
||||
dev_err(&hpb->sdev_ufs_lu->sdev_dev,
|
||||
"byte4 %x byte5 %x byte6 %x additional_len %x",
|
||||
sshdr.byte4, sshdr.byte5,
|
||||
sshdr.byte6, sshdr.additional_length);
|
||||
}
|
||||
|
||||
blk_mq_free_request(req);
|
||||
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
|
||||
ufshpb_put_pre_req(pre_req->hpb, pre_req);
|
||||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
||||
}
|
||||
|
||||
static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page)
|
||||
{
|
||||
struct ufshpb_lu *hpb = pre_req->hpb;
|
||||
struct ufshpb_region *rgn;
|
||||
struct ufshpb_subregion *srgn;
|
||||
__be64 *addr;
|
||||
int offset = 0;
|
||||
int copied;
|
||||
unsigned long lpn = pre_req->wb.lpn;
|
||||
int rgn_idx, srgn_idx, srgn_offset;
|
||||
unsigned long flags;
|
||||
|
||||
addr = page_address(page);
|
||||
ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
|
||||
|
||||
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
|
||||
|
||||
next_offset:
|
||||
rgn = hpb->rgn_tbl + rgn_idx;
|
||||
srgn = rgn->srgn_tbl + srgn_idx;
|
||||
|
||||
if (!ufshpb_is_valid_srgn(rgn, srgn))
|
||||
goto mctx_error;
|
||||
|
||||
if (!srgn->mctx)
|
||||
goto mctx_error;
|
||||
|
||||
copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset,
|
||||
pre_req->wb.len - offset,
|
||||
&addr[offset]);
|
||||
|
||||
if (copied < 0)
|
||||
goto mctx_error;
|
||||
|
||||
offset += copied;
|
||||
srgn_offset += copied;
|
||||
|
||||
if (srgn_offset == hpb->entries_per_srgn) {
|
||||
srgn_offset = 0;
|
||||
|
||||
if (++srgn_idx == hpb->srgns_per_rgn) {
|
||||
srgn_idx = 0;
|
||||
rgn_idx++;
|
||||
}
|
||||
}
|
||||
|
||||
if (offset < pre_req->wb.len)
|
||||
goto next_offset;
|
||||
|
||||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
||||
return 0;
|
||||
mctx_error:
|
||||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb,
|
||||
struct request_queue *q,
|
||||
struct ufshpb_req *pre_req)
|
||||
{
|
||||
struct page *page = pre_req->wb.m_page;
|
||||
struct bio *bio = pre_req->bio;
|
||||
int entries_bytes, ret;
|
||||
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
if (ufshpb_prep_entry(pre_req, page))
|
||||
return -ENOMEM;
|
||||
|
||||
entries_bytes = pre_req->wb.len * sizeof(__be64);
|
||||
|
||||
ret = bio_add_pc_page(q, bio, page, entries_bytes, 0);
|
||||
if (ret != entries_bytes) {
|
||||
dev_err(&hpb->sdev_ufs_lu->sdev_dev,
|
||||
"bio_add_pc_page fail: %d", ret);
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
|
||||
{
|
||||
if (++hpb->cur_read_id >= MAX_HPB_READ_ID)
|
||||
hpb->cur_read_id = 1;
|
||||
return hpb->cur_read_id;
|
||||
}
|
||||
|
||||
static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
|
||||
struct ufshpb_req *pre_req, int read_id)
|
||||
{
|
||||
struct scsi_device *sdev = cmd->device;
|
||||
struct request_queue *q = sdev->request_queue;
|
||||
struct request *req;
|
||||
struct scsi_request *rq;
|
||||
struct bio *bio = pre_req->bio;
|
||||
|
||||
pre_req->hpb = hpb;
|
||||
pre_req->wb.lpn = sectors_to_logical(cmd->device,
|
||||
blk_rq_pos(scsi_cmd_to_rq(cmd)));
|
||||
pre_req->wb.len = sectors_to_logical(cmd->device,
|
||||
blk_rq_sectors(scsi_cmd_to_rq(cmd)));
|
||||
if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req))
|
||||
return -ENOMEM;
|
||||
|
||||
req = pre_req->req;
|
||||
|
||||
/* 1. request setup */
|
||||
blk_rq_append_bio(req, bio);
|
||||
req->rq_disk = NULL;
|
||||
req->end_io_data = (void *)pre_req;
|
||||
req->end_io = ufshpb_pre_req_compl_fn;
|
||||
|
||||
/* 2. scsi_request setup */
|
||||
rq = scsi_req(req);
|
||||
rq->retries = 1;
|
||||
|
||||
ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len,
|
||||
read_id);
|
||||
rq->cmd_len = scsi_command_size(rq->cmd);
|
||||
|
||||
if (blk_insert_cloned_request(q, req) != BLK_STS_OK)
|
||||
return -EAGAIN;
|
||||
|
||||
hpb->stats.pre_req_cnt++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
|
||||
int *read_id)
|
||||
{
|
||||
struct ufshpb_req *pre_req;
|
||||
struct request *req = NULL;
|
||||
unsigned long flags;
|
||||
int _read_id;
|
||||
int ret = 0;
|
||||
|
||||
req = blk_get_request(cmd->device->request_queue,
|
||||
REQ_OP_DRV_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
|
||||
if (IS_ERR(req))
|
||||
return -EAGAIN;
|
||||
|
||||
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
|
||||
pre_req = ufshpb_get_pre_req(hpb);
|
||||
if (!pre_req) {
|
||||
ret = -EAGAIN;
|
||||
goto unlock_out;
|
||||
}
|
||||
_read_id = ufshpb_get_read_id(hpb);
|
||||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
||||
|
||||
pre_req->req = req;
|
||||
|
||||
ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id);
|
||||
if (ret)
|
||||
goto free_pre_req;
|
||||
|
||||
*read_id = _read_id;
|
||||
|
||||
return ret;
|
||||
free_pre_req:
|
||||
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
|
||||
ufshpb_put_pre_req(hpb, pre_req);
|
||||
unlock_out:
|
||||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
||||
blk_put_request(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function will set up HPB read command using host-side L2P map data.
|
||||
*/
|
||||
@ -609,7 +354,6 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
__be64 ppn;
|
||||
unsigned long flags;
|
||||
int transfer_len, rgn_idx, srgn_idx, srgn_offset;
|
||||
int read_id = 0;
|
||||
int err = 0;
|
||||
|
||||
hpb = ufshpb_get_hpb_data(cmd->device);
|
||||
@ -685,24 +429,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
dev_err(hba->dev, "get ppn failed. err %d\n", err);
|
||||
return err;
|
||||
}
|
||||
if (!ufshpb_is_legacy(hba) &&
|
||||
ufshpb_is_required_wb(hpb, transfer_len)) {
|
||||
err = ufshpb_issue_pre_req(hpb, cmd, &read_id);
|
||||
if (err) {
|
||||
unsigned long timeout;
|
||||
|
||||
timeout = cmd->jiffies_at_alloc + msecs_to_jiffies(
|
||||
hpb->params.requeue_timeout_ms);
|
||||
|
||||
if (time_before(jiffies, timeout))
|
||||
return -EAGAIN;
|
||||
|
||||
hpb->stats.miss_cnt++;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len, read_id);
|
||||
ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
|
||||
|
||||
hpb->stats.hit_cnt++;
|
||||
return 0;
|
||||
@ -1841,16 +1569,11 @@ static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
|
||||
u32 entries_per_rgn;
|
||||
u64 rgn_mem_size, tmp;
|
||||
|
||||
/* for pre_req */
|
||||
hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1;
|
||||
|
||||
if (ufshpb_is_legacy(hba))
|
||||
hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
|
||||
else
|
||||
hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
|
||||
|
||||
hpb->cur_read_id = 0;
|
||||
|
||||
hpb->lu_pinned_start = hpb_lu_info->pinned_start;
|
||||
hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
|
||||
(hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
|
||||
|
@ -241,8 +241,6 @@ struct ufshpb_lu {
|
||||
spinlock_t param_lock;
|
||||
|
||||
struct list_head lh_pre_req_free;
|
||||
int cur_read_id;
|
||||
int pre_req_min_tr_len;
|
||||
int pre_req_max_tr_len;
|
||||
|
||||
/* cached L2P map management worker */
|
||||
|
Loading…
Reference in New Issue
Block a user