mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
scsi: ufs: core: Prepare ufshcd_send_command() for MCQ
Add support to send commands using multiple submission queues in MCQ mode. Modify the functions that use ufshcd_send_command(). Co-developed-by: Can Guo <quic_cang@quicinc.com> Signed-off-by: Can Guo <quic_cang@quicinc.com> Signed-off-by: Asutosh Das <quic_asutoshd@quicinc.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Manivannan Sadhasivam <mani@kernel.org> Reviewed-by: Stanley Chu <stanley.chu@mediatek.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
0d33728fc0
commit
22a2d563de
@ -309,6 +309,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
|
||||
for (i = 0; i < hba->nr_hw_queues; i++) {
|
||||
hwq = &hba->uhq[i];
|
||||
hwq->max_entries = hba->nutrs;
|
||||
spin_lock_init(&hwq->sq_lock);
|
||||
}
|
||||
|
||||
/* The very first HW queue serves device commands */
|
||||
|
@ -335,4 +335,14 @@ static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, u8
|
||||
return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
|
||||
}
|
||||
|
||||
static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
|
||||
{
|
||||
u32 mask = q->max_entries - 1;
|
||||
u32 val;
|
||||
|
||||
q->sq_tail_slot = (q->sq_tail_slot + 1) & mask;
|
||||
val = q->sq_tail_slot * sizeof(struct utp_transfer_req_desc);
|
||||
writel(val, q->mcq_sq_tail);
|
||||
}
|
||||
|
||||
#endif /* _UFSHCD_PRIV_H_ */
|
||||
|
@ -2185,9 +2185,11 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *
|
||||
* ufshcd_send_command - Send SCSI or device management commands
|
||||
* @hba: per adapter instance
|
||||
* @task_tag: Task tag of the command
|
||||
* @hwq: pointer to hardware queue instance
|
||||
*/
|
||||
static inline
|
||||
void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
|
||||
void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
|
||||
struct ufs_hw_queue *hwq)
|
||||
{
|
||||
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
|
||||
unsigned long flags;
|
||||
@ -2201,13 +2203,25 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
|
||||
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
|
||||
ufshcd_start_monitor(hba, lrbp);
|
||||
|
||||
if (is_mcq_enabled(hba)) {
|
||||
int utrd_size = sizeof(struct utp_transfer_req_desc);
|
||||
|
||||
spin_lock(&hwq->sq_lock);
|
||||
memcpy(hwq->sqe_base_addr + (hwq->sq_tail_slot * utrd_size),
|
||||
lrbp->utr_descriptor_ptr, utrd_size);
|
||||
ufshcd_inc_sq_tail(hwq);
|
||||
spin_unlock(&hwq->sq_lock);
|
||||
} else {
|
||||
spin_lock_irqsave(&hba->outstanding_lock, flags);
|
||||
if (hba->vops && hba->vops->setup_xfer_req)
|
||||
hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
|
||||
__set_bit(task_tag, &hba->outstanding_reqs);
|
||||
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
hba->vops->setup_xfer_req(hba, lrbp->task_tag,
|
||||
!!lrbp->cmd);
|
||||
__set_bit(lrbp->task_tag, &hba->outstanding_reqs);
|
||||
ufshcd_writel(hba, 1 << lrbp->task_tag,
|
||||
REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_copy_sense_data - Copy sense data in case of check condition
|
||||
@ -2836,6 +2850,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
int tag = scsi_cmd_to_rq(cmd)->tag;
|
||||
struct ufshcd_lrb *lrbp;
|
||||
int err = 0;
|
||||
struct ufs_hw_queue *hwq = NULL;
|
||||
|
||||
WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
|
||||
|
||||
@ -2920,7 +2935,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ufshcd_send_command(hba, tag);
|
||||
ufshcd_send_command(hba, tag, hwq);
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
@ -3121,10 +3136,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
|
||||
goto out;
|
||||
|
||||
hba->dev_cmd.complete = &wait;
|
||||
hba->dev_cmd.cqe = NULL;
|
||||
|
||||
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
|
||||
|
||||
ufshcd_send_command(hba, tag);
|
||||
ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
|
||||
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
|
||||
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
|
||||
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
|
||||
@ -6938,7 +6954,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
|
||||
|
||||
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
|
||||
|
||||
ufshcd_send_command(hba, tag);
|
||||
ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
|
||||
/*
|
||||
* ignore the returning value here - ufshcd_check_query_response is
|
||||
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
|
||||
@ -7104,7 +7120,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
|
||||
|
||||
hba->dev_cmd.complete = &wait;
|
||||
|
||||
ufshcd_send_command(hba, tag);
|
||||
ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
|
||||
|
||||
err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
|
||||
|
||||
|
@ -224,6 +224,7 @@ struct ufs_dev_cmd {
|
||||
struct mutex lock;
|
||||
struct completion *complete;
|
||||
struct ufs_query query;
|
||||
struct cq_entry *cqe;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -1078,6 +1079,8 @@ struct ufs_hba {
|
||||
* @cqe_dma_addr: completion queue dma address
|
||||
* @max_entries: max number of slots in this hardware queue
|
||||
* @id: hardware queue ID
|
||||
* @sq_tp_slot: current slot to which SQ tail pointer is pointing
|
||||
* @sq_lock: serialize submission queue access
|
||||
*/
|
||||
struct ufs_hw_queue {
|
||||
void __iomem *mcq_sq_head;
|
||||
@ -1091,6 +1094,8 @@ struct ufs_hw_queue {
|
||||
dma_addr_t cqe_dma_addr;
|
||||
u32 max_entries;
|
||||
u32 id;
|
||||
u32 sq_tail_slot;
|
||||
spinlock_t sq_lock;
|
||||
};
|
||||
|
||||
static inline bool is_mcq_enabled(struct ufs_hba *hba)
|
||||
|
Loading…
Reference in New Issue
Block a user