mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
Merge patch series "UFS patches for kernel 6.11"
Bart Van Assche <bvanassche@acm.org> says: Hi Martin, Please consider this series of UFS driver patches for the next merge window. Thank you, Bart. Link: https://lore.kernel.org/r/20240708211716.2827751-1-bvanassche@acm.org Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
e30618a480
@ -137,7 +137,6 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr);
|
||||
*
|
||||
* MAC - Max. Active Command of the Host Controller (HC)
|
||||
* HC wouldn't send more than this commands to the device.
|
||||
* It is mandatory to implement get_hba_mac() to enable MCQ mode.
|
||||
* Calculates and adjusts the queue depth based on the depth
|
||||
* supported by the HC and ufs device.
|
||||
*/
|
||||
@ -145,12 +144,21 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
|
||||
{
|
||||
int mac;
|
||||
|
||||
/* Mandatory to implement get_hba_mac() */
|
||||
mac = ufshcd_mcq_vops_get_hba_mac(hba);
|
||||
if (mac < 0) {
|
||||
dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
|
||||
return mac;
|
||||
if (!hba->vops || !hba->vops->get_hba_mac) {
|
||||
/*
|
||||
* Extract the maximum number of active transfer tasks value
|
||||
* from the host controller capabilities register. This value is
|
||||
* 0-based.
|
||||
*/
|
||||
hba->capabilities =
|
||||
ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
|
||||
mac = hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_MCQ;
|
||||
mac++;
|
||||
} else {
|
||||
mac = hba->vops->get_hba_mac(hba);
|
||||
}
|
||||
if (mac < 0)
|
||||
goto err;
|
||||
|
||||
WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
|
||||
/*
|
||||
@ -159,6 +167,10 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
|
||||
* shared queuing architecture is enabled.
|
||||
*/
|
||||
return min_t(int, mac, hba->dev_info.bqueuedepth);
|
||||
|
||||
err:
|
||||
dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
|
||||
return mac;
|
||||
}
|
||||
|
||||
static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
|
||||
@ -415,9 +427,16 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
|
||||
void ufshcd_mcq_enable(struct ufs_hba *hba)
|
||||
{
|
||||
ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG);
|
||||
hba->mcq_enabled = true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ufshcd_mcq_enable);
|
||||
|
||||
void ufshcd_mcq_disable(struct ufs_hba *hba)
|
||||
{
|
||||
ufshcd_rmwl(hba, MCQ_MODE_SELECT, 0, REG_UFS_MEM_CFG);
|
||||
hba->mcq_enabled = false;
|
||||
}
|
||||
|
||||
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
|
||||
{
|
||||
ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);
|
||||
|
@ -64,16 +64,11 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
|
||||
void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
|
||||
struct cq_entry *cqe);
|
||||
int ufshcd_mcq_init(struct ufs_hba *hba);
|
||||
void ufshcd_mcq_disable(struct ufs_hba *hba);
|
||||
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
|
||||
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
|
||||
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
|
||||
void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
|
||||
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
|
||||
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
|
||||
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
|
||||
struct request *req);
|
||||
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq);
|
||||
void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq);
|
||||
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
|
||||
@ -255,14 +250,6 @@ static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba)
|
||||
{
|
||||
if (hba->vops && hba->vops->get_hba_mac)
|
||||
return hba->vops->get_hba_mac(hba);
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba)
|
||||
{
|
||||
if (hba->vops && hba->vops->op_runtime_config)
|
||||
|
@ -164,8 +164,6 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
|
||||
enum {
|
||||
UFSHCD_MAX_CHANNEL = 0,
|
||||
UFSHCD_MAX_ID = 1,
|
||||
UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
|
||||
UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
|
||||
};
|
||||
|
||||
static const char *const ufshcd_state_name[] = {
|
||||
@ -455,7 +453,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
|
||||
|
||||
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
|
||||
|
||||
if (is_mcq_enabled(hba)) {
|
||||
if (hba->mcq_enabled) {
|
||||
struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
|
||||
|
||||
hwq_id = hwq->id;
|
||||
@ -2304,7 +2302,7 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
|
||||
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
|
||||
ufshcd_start_monitor(hba, lrbp);
|
||||
|
||||
if (is_mcq_enabled(hba)) {
|
||||
if (hba->mcq_enabled) {
|
||||
int utrd_size = sizeof(struct utp_transfer_req_desc);
|
||||
struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr;
|
||||
struct utp_transfer_req_desc *dest;
|
||||
@ -2404,7 +2402,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
|
||||
hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
|
||||
|
||||
/* nutrs and nutmrs are 0 based values */
|
||||
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
|
||||
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1;
|
||||
hba->nutmrs =
|
||||
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
|
||||
hba->reserved_slot = hba->nutrs - 1;
|
||||
@ -3003,7 +3001,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (is_mcq_enabled(hba))
|
||||
if (hba->mcq_enabled)
|
||||
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
|
||||
|
||||
ufshcd_send_command(hba, tag, hwq);
|
||||
@ -3062,7 +3060,7 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
|
||||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
if (is_mcq_enabled(hba)) {
|
||||
if (hba->mcq_enabled) {
|
||||
/*
|
||||
* MCQ mode. Clean up the MCQ resources similar to
|
||||
* what the ufshcd_utrl_clear() does for SDB mode.
|
||||
@ -3172,7 +3170,7 @@ retry:
|
||||
__func__, lrbp->task_tag);
|
||||
|
||||
/* MCQ mode */
|
||||
if (is_mcq_enabled(hba)) {
|
||||
if (hba->mcq_enabled) {
|
||||
/* successfully cleared the command, retry if needed */
|
||||
if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
|
||||
err = -EAGAIN;
|
||||
@ -3994,11 +3992,11 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
|
||||
*/
|
||||
static int ufshcd_dme_link_startup(struct ufs_hba *hba)
|
||||
{
|
||||
struct uic_command uic_cmd = {0};
|
||||
struct uic_command uic_cmd = {
|
||||
.command = UIC_CMD_DME_LINK_STARTUP,
|
||||
};
|
||||
int ret;
|
||||
|
||||
uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
|
||||
|
||||
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
|
||||
if (ret)
|
||||
dev_dbg(hba->dev,
|
||||
@ -4016,11 +4014,11 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
|
||||
*/
|
||||
static int ufshcd_dme_reset(struct ufs_hba *hba)
|
||||
{
|
||||
struct uic_command uic_cmd = {0};
|
||||
struct uic_command uic_cmd = {
|
||||
.command = UIC_CMD_DME_RESET,
|
||||
};
|
||||
int ret;
|
||||
|
||||
uic_cmd.command = UIC_CMD_DME_RESET;
|
||||
|
||||
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
|
||||
if (ret)
|
||||
dev_err(hba->dev,
|
||||
@ -4055,11 +4053,11 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
|
||||
*/
|
||||
static int ufshcd_dme_enable(struct ufs_hba *hba)
|
||||
{
|
||||
struct uic_command uic_cmd = {0};
|
||||
struct uic_command uic_cmd = {
|
||||
.command = UIC_CMD_DME_ENABLE,
|
||||
};
|
||||
int ret;
|
||||
|
||||
uic_cmd.command = UIC_CMD_DME_ENABLE;
|
||||
|
||||
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
|
||||
if (ret)
|
||||
dev_err(hba->dev,
|
||||
@ -4112,7 +4110,12 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
|
||||
int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
|
||||
u8 attr_set, u32 mib_val, u8 peer)
|
||||
{
|
||||
struct uic_command uic_cmd = {0};
|
||||
struct uic_command uic_cmd = {
|
||||
.command = peer ? UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET,
|
||||
.argument1 = attr_sel,
|
||||
.argument2 = UIC_ARG_ATTR_TYPE(attr_set),
|
||||
.argument3 = mib_val,
|
||||
};
|
||||
static const char *const action[] = {
|
||||
"dme-set",
|
||||
"dme-peer-set"
|
||||
@ -4121,12 +4124,6 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
|
||||
int ret;
|
||||
int retries = UFS_UIC_COMMAND_RETRIES;
|
||||
|
||||
uic_cmd.command = peer ?
|
||||
UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
|
||||
uic_cmd.argument1 = attr_sel;
|
||||
uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
|
||||
uic_cmd.argument3 = mib_val;
|
||||
|
||||
do {
|
||||
/* for peer attributes we retry upon failure */
|
||||
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
|
||||
@ -4156,7 +4153,10 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
|
||||
int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
|
||||
u32 *mib_val, u8 peer)
|
||||
{
|
||||
struct uic_command uic_cmd = {0};
|
||||
struct uic_command uic_cmd = {
|
||||
.command = peer ? UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET,
|
||||
.argument1 = attr_sel,
|
||||
};
|
||||
static const char *const action[] = {
|
||||
"dme-get",
|
||||
"dme-peer-get"
|
||||
@ -4190,10 +4190,6 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
|
||||
}
|
||||
}
|
||||
|
||||
uic_cmd.command = peer ?
|
||||
UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
|
||||
uic_cmd.argument1 = attr_sel;
|
||||
|
||||
do {
|
||||
/* for peer attributes we retry upon failure */
|
||||
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
|
||||
@ -4326,7 +4322,11 @@ out_unlock:
|
||||
*/
|
||||
int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
|
||||
{
|
||||
struct uic_command uic_cmd = {0};
|
||||
struct uic_command uic_cmd = {
|
||||
.command = UIC_CMD_DME_SET,
|
||||
.argument1 = UIC_ARG_MIB(PA_PWRMODE),
|
||||
.argument3 = mode,
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
|
||||
@ -4339,9 +4339,6 @@ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
|
||||
}
|
||||
}
|
||||
|
||||
uic_cmd.command = UIC_CMD_DME_SET;
|
||||
uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
|
||||
uic_cmd.argument3 = mode;
|
||||
ufshcd_hold(hba);
|
||||
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
|
||||
ufshcd_release(hba);
|
||||
@ -4382,13 +4379,14 @@ EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
|
||||
|
||||
int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
|
||||
{
|
||||
int ret;
|
||||
struct uic_command uic_cmd = {0};
|
||||
struct uic_command uic_cmd = {
|
||||
.command = UIC_CMD_DME_HIBER_ENTER,
|
||||
};
|
||||
ktime_t start = ktime_get();
|
||||
int ret;
|
||||
|
||||
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
|
||||
|
||||
uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
|
||||
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
|
||||
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
|
||||
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
|
||||
@ -4406,13 +4404,14 @@ EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
|
||||
|
||||
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
|
||||
{
|
||||
struct uic_command uic_cmd = {0};
|
||||
struct uic_command uic_cmd = {
|
||||
.command = UIC_CMD_DME_HIBER_EXIT,
|
||||
};
|
||||
int ret;
|
||||
ktime_t start = ktime_get();
|
||||
|
||||
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
|
||||
|
||||
uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
|
||||
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
|
||||
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
|
||||
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
|
||||
@ -5562,7 +5561,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
|
||||
u32 tr_doorbell;
|
||||
struct ufs_hw_queue *hwq;
|
||||
|
||||
if (is_mcq_enabled(hba)) {
|
||||
if (hba->mcq_enabled) {
|
||||
hwq = &hba->uhq[queue_num];
|
||||
|
||||
return ufshcd_mcq_poll_cqe_lock(hba, hwq);
|
||||
@ -6203,7 +6202,7 @@ out:
|
||||
/* Complete requests that have door-bell cleared */
|
||||
static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
|
||||
{
|
||||
if (is_mcq_enabled(hba))
|
||||
if (hba->mcq_enabled)
|
||||
ufshcd_mcq_compl_pending_transfer(hba, force_compl);
|
||||
else
|
||||
ufshcd_transfer_req_compl(hba);
|
||||
@ -6460,7 +6459,7 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
|
||||
*ret ? "failed" : "succeeded");
|
||||
|
||||
/* Release cmd in MCQ mode if abort succeeds */
|
||||
if (is_mcq_enabled(hba) && (*ret == 0)) {
|
||||
if (hba->mcq_enabled && (*ret == 0)) {
|
||||
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
|
||||
if (!hwq)
|
||||
return 0;
|
||||
@ -7393,7 +7392,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (is_mcq_enabled(hba)) {
|
||||
if (hba->mcq_enabled) {
|
||||
for (pos = 0; pos < hba->nutrs; pos++) {
|
||||
lrbp = &hba->lrb[pos];
|
||||
if (ufshcd_cmd_inflight(lrbp->cmd) &&
|
||||
@ -7489,7 +7488,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
|
||||
*/
|
||||
dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
|
||||
__func__, tag);
|
||||
if (is_mcq_enabled(hba)) {
|
||||
if (hba->mcq_enabled) {
|
||||
/* MCQ mode */
|
||||
if (ufshcd_cmd_inflight(lrbp->cmd)) {
|
||||
/* sleep for max. 200us same delay as in SDB mode */
|
||||
@ -7567,7 +7566,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
||||
|
||||
ufshcd_hold(hba);
|
||||
|
||||
if (!is_mcq_enabled(hba)) {
|
||||
if (!hba->mcq_enabled) {
|
||||
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
if (!test_bit(tag, &hba->outstanding_reqs)) {
|
||||
/* If command is already aborted/completed, return FAILED. */
|
||||
@ -7600,7 +7599,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
||||
}
|
||||
hba->req_abort_count++;
|
||||
|
||||
if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) {
|
||||
if (!hba->mcq_enabled && !(reg & (1 << tag))) {
|
||||
/* only execute this code in single doorbell mode */
|
||||
dev_err(hba->dev,
|
||||
"%s: cmd was completed, but without a notifying intr, tag = %d",
|
||||
@ -7627,7 +7626,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
||||
goto release;
|
||||
}
|
||||
|
||||
if (is_mcq_enabled(hba)) {
|
||||
if (hba->mcq_enabled) {
|
||||
/* MCQ mode. Branch off to handle abort for mcq mode */
|
||||
err = ufshcd_mcq_abort(cmd);
|
||||
goto release;
|
||||
@ -8682,6 +8681,9 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
|
||||
hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
hba->nutrs = old_nutrs;
|
||||
@ -8703,12 +8705,6 @@ static void ufshcd_config_mcq(struct ufs_hba *hba)
|
||||
ufshcd_mcq_make_queues_operational(hba);
|
||||
ufshcd_mcq_config_mac(hba, hba->nutrs);
|
||||
|
||||
hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
|
||||
hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
|
||||
|
||||
ufshcd_mcq_enable(hba);
|
||||
hba->mcq_enabled = true;
|
||||
|
||||
dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
|
||||
hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
|
||||
hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
|
||||
@ -8736,8 +8732,10 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
|
||||
ufshcd_set_link_active(hba);
|
||||
|
||||
/* Reconfigure MCQ upon reset */
|
||||
if (is_mcq_enabled(hba) && !init_dev_params)
|
||||
if (hba->mcq_enabled && !init_dev_params) {
|
||||
ufshcd_config_mcq(hba);
|
||||
ufshcd_mcq_enable(hba);
|
||||
}
|
||||
|
||||
/* Verify device initialization by sending NOP OUT UPIU */
|
||||
ret = ufshcd_verify_dev_init(hba);
|
||||
@ -8758,11 +8756,13 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
|
||||
if (ret)
|
||||
return ret;
|
||||
if (is_mcq_supported(hba) && !hba->scsi_host_added) {
|
||||
ufshcd_mcq_enable(hba);
|
||||
ret = ufshcd_alloc_mcq(hba);
|
||||
if (!ret) {
|
||||
ufshcd_config_mcq(hba);
|
||||
} else {
|
||||
/* Continue with SDB mode */
|
||||
ufshcd_mcq_disable(hba);
|
||||
use_mcq_mode = false;
|
||||
dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
|
||||
ret);
|
||||
@ -8776,6 +8776,7 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
|
||||
} else if (is_mcq_supported(hba)) {
|
||||
/* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
|
||||
ufshcd_config_mcq(hba);
|
||||
ufshcd_mcq_enable(hba);
|
||||
}
|
||||
}
|
||||
|
||||
@ -8961,8 +8962,6 @@ static const struct scsi_host_template ufshcd_driver_template = {
|
||||
.eh_timed_out = ufshcd_eh_timed_out,
|
||||
.this_id = -1,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
|
||||
.can_queue = UFSHCD_CAN_QUEUE,
|
||||
.max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
|
||||
.max_sectors = SZ_1M / SECTOR_SIZE,
|
||||
.max_host_blocked = 1,
|
||||
|
@ -693,7 +693,7 @@ static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba)
|
||||
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
|
||||
u32 irq, i;
|
||||
|
||||
if (!is_mcq_enabled(hba))
|
||||
if (!hba->mcq_enabled)
|
||||
return;
|
||||
|
||||
if (host->mcq_nr_intr == 0)
|
||||
@ -711,7 +711,7 @@ static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba)
|
||||
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
|
||||
u32 irq, i;
|
||||
|
||||
if (!is_mcq_enabled(hba))
|
||||
if (!hba->mcq_enabled)
|
||||
return;
|
||||
|
||||
if (host->mcq_nr_intr == 0)
|
||||
@ -1308,7 +1308,7 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (is_mcq_enabled(hba)) {
|
||||
if (hba->mcq_enabled) {
|
||||
ufs_mtk_config_mcq(hba, false);
|
||||
ufshcd_mcq_make_queues_operational(hba);
|
||||
ufshcd_mcq_config_mac(hba, hba->nutrs);
|
||||
|
@ -73,8 +73,8 @@ enum ufs_event_type {
|
||||
* @done: UIC command completion
|
||||
*/
|
||||
struct uic_command {
|
||||
u32 command;
|
||||
u32 argument1;
|
||||
const u32 command;
|
||||
const u32 argument1;
|
||||
u32 argument2;
|
||||
u32 argument3;
|
||||
int cmd_active;
|
||||
@ -325,7 +325,9 @@ struct ufs_pwr_mode_info {
|
||||
* @event_notify: called to notify important events
|
||||
* @reinit_notify: called to notify reinit of UFSHCD during max gear switch
|
||||
* @mcq_config_resource: called to configure MCQ platform resources
|
||||
* @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode
|
||||
* @get_hba_mac: reports maximum number of outstanding commands supported by
|
||||
* the controller. Should be implemented for UFSHCI 4.0 or later
|
||||
* controllers that are not compliant with the UFSHCI 4.0 specification.
|
||||
* @op_runtime_config: called to config Operation and runtime regs Pointers
|
||||
* @get_outstanding_cqs: called to get outstanding completion queues
|
||||
* @config_esi: called to config Event Specific Interrupt
|
||||
@ -1133,11 +1135,6 @@ struct ufs_hw_queue {
|
||||
|
||||
#define MCQ_QCFG_SIZE 0x40
|
||||
|
||||
static inline bool is_mcq_enabled(struct ufs_hba *hba)
|
||||
{
|
||||
return hba->mcq_enabled;
|
||||
}
|
||||
|
||||
static inline unsigned int ufshcd_mcq_opr_offset(struct ufs_hba *hba,
|
||||
enum ufshcd_mcq_opr opr, int idx)
|
||||
{
|
||||
|
@ -67,7 +67,8 @@ enum {
|
||||
|
||||
/* Controller capability masks */
|
||||
enum {
|
||||
MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F,
|
||||
MASK_TRANSFER_REQUESTS_SLOTS_SDB = 0x0000001F,
|
||||
MASK_TRANSFER_REQUESTS_SLOTS_MCQ = 0x000000FF,
|
||||
MASK_NUMBER_OUTSTANDING_RTT = 0x0000FF00,
|
||||
MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
|
||||
MASK_EHSLUTRD_SUPPORTED = 0x00400000,
|
||||
|
Loading…
Reference in New Issue
Block a user