mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
Merge patch series "scsi: ufs: qcom: Code cleanups"
Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> says: Hello, This series has code some cleanups to the Qcom UFS driver. No functional change. In this version, I've removed code supporting legacy controllers ver < 2.0, as the respective platforms were never supported in upstream. Tested on: RB5 development board based on Qcom SM8250 SoC. Link: https://lore.kernel.org/r/20231208065902.11006-1-manivannan.sadhasivam@linaro.org Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
ae6fd54e60
@ -289,21 +289,23 @@ static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
|
||||
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
|
||||
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
|
||||
|
||||
static inline void ufshcd_enable_irq(struct ufs_hba *hba)
|
||||
void ufshcd_enable_irq(struct ufs_hba *hba)
|
||||
{
|
||||
if (!hba->is_irq_enabled) {
|
||||
enable_irq(hba->irq);
|
||||
hba->is_irq_enabled = true;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ufshcd_enable_irq);
|
||||
|
||||
static inline void ufshcd_disable_irq(struct ufs_hba *hba)
|
||||
void ufshcd_disable_irq(struct ufs_hba *hba)
|
||||
{
|
||||
if (hba->is_irq_enabled) {
|
||||
disable_irq(hba->irq);
|
||||
hba->is_irq_enabled = false;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ufshcd_disable_irq);
|
||||
|
||||
static void ufshcd_configure_wb(struct ufs_hba *hba)
|
||||
{
|
||||
|
@ -4,26 +4,26 @@
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/devfreq.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/interconnect.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/phy/phy.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reset-controller.h>
|
||||
#include <linux/devfreq.h>
|
||||
#include <linux/time.h>
|
||||
|
||||
#include <soc/qcom/ice.h>
|
||||
|
||||
#include <ufs/ufshcd.h>
|
||||
#include "ufshcd-pltfrm.h"
|
||||
#include <ufs/unipro.h>
|
||||
#include "ufs-qcom.h"
|
||||
#include <ufs/ufshci.h>
|
||||
#include <ufs/ufs_quirks.h>
|
||||
#include <ufs/unipro.h>
|
||||
#include "ufshcd-pltfrm.h"
|
||||
#include "ufs-qcom.h"
|
||||
|
||||
#define MCQ_QCFGPTR_MASK GENMASK(7, 0)
|
||||
#define MCQ_QCFGPTR_UNIT 0x200
|
||||
@ -90,8 +90,6 @@ static const struct __ufs_qcom_bw_table {
|
||||
[MODE_MAX][0][0] = { 7643136, 307200 },
|
||||
};
|
||||
|
||||
static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
|
||||
|
||||
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
|
||||
static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up);
|
||||
|
||||
@ -158,7 +156,7 @@ static int ufs_qcom_ice_program_key(struct ufs_hba *hba,
|
||||
cap = hba->crypto_cap_array[cfg->crypto_cap_idx];
|
||||
if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS ||
|
||||
cap.key_size != UFS_CRYPTO_KEY_SIZE_256)
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (config_enable)
|
||||
return qcom_ice_program_key(host->ice,
|
||||
@ -194,52 +192,12 @@ static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int ufs_qcom_host_clk_get(struct device *dev,
|
||||
const char *name, struct clk **clk_out, bool optional)
|
||||
{
|
||||
struct clk *clk;
|
||||
int err = 0;
|
||||
|
||||
clk = devm_clk_get(dev, name);
|
||||
if (!IS_ERR(clk)) {
|
||||
*clk_out = clk;
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = PTR_ERR(clk);
|
||||
|
||||
if (optional && err == -ENOENT) {
|
||||
*clk_out = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (err != -EPROBE_DEFER)
|
||||
dev_err(dev, "failed to get %s err %d\n", name, err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ufs_qcom_host_clk_enable(struct device *dev,
|
||||
const char *name, struct clk *clk)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
err = clk_prepare_enable(clk);
|
||||
if (err)
|
||||
dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
|
||||
{
|
||||
if (!host->is_lane_clks_enabled)
|
||||
return;
|
||||
|
||||
clk_disable_unprepare(host->tx_l1_sync_clk);
|
||||
clk_disable_unprepare(host->tx_l0_sync_clk);
|
||||
clk_disable_unprepare(host->rx_l1_sync_clk);
|
||||
clk_disable_unprepare(host->rx_l0_sync_clk);
|
||||
clk_bulk_disable_unprepare(host->num_clks, host->clks);
|
||||
|
||||
host->is_lane_clks_enabled = false;
|
||||
}
|
||||
@ -247,73 +205,29 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
|
||||
static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
|
||||
{
|
||||
int err;
|
||||
struct device *dev = host->hba->dev;
|
||||
|
||||
if (host->is_lane_clks_enabled)
|
||||
return 0;
|
||||
|
||||
err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
|
||||
host->rx_l0_sync_clk);
|
||||
err = clk_bulk_prepare_enable(host->num_clks, host->clks);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
|
||||
host->tx_l0_sync_clk);
|
||||
if (err)
|
||||
goto disable_rx_l0;
|
||||
|
||||
err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
|
||||
host->rx_l1_sync_clk);
|
||||
if (err)
|
||||
goto disable_tx_l0;
|
||||
|
||||
err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
|
||||
host->tx_l1_sync_clk);
|
||||
if (err)
|
||||
goto disable_rx_l1;
|
||||
|
||||
host->is_lane_clks_enabled = true;
|
||||
|
||||
return 0;
|
||||
|
||||
disable_rx_l1:
|
||||
clk_disable_unprepare(host->rx_l1_sync_clk);
|
||||
disable_tx_l0:
|
||||
clk_disable_unprepare(host->tx_l0_sync_clk);
|
||||
disable_rx_l0:
|
||||
clk_disable_unprepare(host->rx_l0_sync_clk);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
|
||||
{
|
||||
int err = 0;
|
||||
int err;
|
||||
struct device *dev = host->hba->dev;
|
||||
|
||||
if (has_acpi_companion(dev))
|
||||
return 0;
|
||||
|
||||
err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
|
||||
&host->rx_l0_sync_clk, false);
|
||||
if (err)
|
||||
err = devm_clk_bulk_get_all(dev, &host->clks);
|
||||
if (err <= 0)
|
||||
return err;
|
||||
|
||||
err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
|
||||
&host->tx_l0_sync_clk, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* In case of single lane per direction, don't read lane1 clocks */
|
||||
if (host->hba->lanes_per_direction > 1) {
|
||||
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
|
||||
&host->rx_l1_sync_clk, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
|
||||
&host->tx_l1_sync_clk, true);
|
||||
}
|
||||
host->num_clks = err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -321,7 +235,7 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
|
||||
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
|
||||
{
|
||||
int err;
|
||||
u32 tx_fsm_val = 0;
|
||||
u32 tx_fsm_val;
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
|
||||
|
||||
do {
|
||||
@ -360,9 +274,7 @@ static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
|
||||
|
||||
static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
|
||||
{
|
||||
ufshcd_rmwl(host->hba, QUNIPRO_SEL,
|
||||
ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
|
||||
REG_UFS_CFG1);
|
||||
ufshcd_rmwl(host->hba, QUNIPRO_SEL, QUNIPRO_SEL, REG_UFS_CFG1);
|
||||
|
||||
if (host->hw_ver.major >= 0x05)
|
||||
ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
|
||||
@ -376,18 +288,15 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
|
||||
*/
|
||||
static int ufs_qcom_host_reset(struct ufs_hba *hba)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
bool reenable_intr = false;
|
||||
bool reenable_intr;
|
||||
|
||||
if (!host->core_reset) {
|
||||
dev_warn(hba->dev, "%s: reset control not set\n", __func__);
|
||||
if (!host->core_reset)
|
||||
return 0;
|
||||
}
|
||||
|
||||
reenable_intr = hba->is_irq_enabled;
|
||||
disable_irq(hba->irq);
|
||||
hba->is_irq_enabled = false;
|
||||
ufshcd_disable_irq(hba);
|
||||
|
||||
ret = reset_control_assert(host->core_reset);
|
||||
if (ret) {
|
||||
@ -404,16 +313,16 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
|
||||
usleep_range(200, 210);
|
||||
|
||||
ret = reset_control_deassert(host->core_reset);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
|
||||
__func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
usleep_range(1000, 1100);
|
||||
|
||||
if (reenable_intr) {
|
||||
enable_irq(hba->irq);
|
||||
hba->is_irq_enabled = true;
|
||||
}
|
||||
if (reenable_intr)
|
||||
ufshcd_enable_irq(hba);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -422,18 +331,8 @@ static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
|
||||
if (host->hw_ver.major == 0x1) {
|
||||
/*
|
||||
* HS-G3 operations may not reliably work on legacy QCOM
|
||||
* UFS host controller hardware even though capability
|
||||
* exchange during link startup phase may end up
|
||||
* negotiating maximum supported gear as G3.
|
||||
* Hence downgrade the maximum supported gear to HS-G2.
|
||||
*/
|
||||
return UFS_HS_G2;
|
||||
} else if (host->hw_ver.major >= 0x4) {
|
||||
if (host->hw_ver.major >= 0x4)
|
||||
return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0));
|
||||
}
|
||||
|
||||
/* Default is HS-G3 */
|
||||
return UFS_HS_G3;
|
||||
@ -464,8 +363,7 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
|
||||
/* Reset UFS Host Controller and PHY */
|
||||
ret = ufs_qcom_host_reset(hba);
|
||||
if (ret)
|
||||
dev_warn(hba->dev, "%s: host reset returned %d\n",
|
||||
__func__, ret);
|
||||
return ret;
|
||||
|
||||
/* phy initialization - calibrate the phy */
|
||||
ret = phy_init(phy);
|
||||
@ -507,9 +405,8 @@ out_disable_phy:
|
||||
*/
|
||||
static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
|
||||
{
|
||||
ufshcd_writel(hba,
|
||||
ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
|
||||
REG_UFS_CFG2);
|
||||
ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL,
|
||||
REG_UFS_CFG2);
|
||||
|
||||
/* Ensure that HW clock gating is enabled before next operations */
|
||||
mb();
|
||||
@ -519,11 +416,14 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status status)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
switch (status) {
|
||||
case PRE_CHANGE:
|
||||
ufs_qcom_power_up_sequence(hba);
|
||||
err = ufs_qcom_power_up_sequence(hba);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* The PHY PLL output is the source of tx/rx lane symbol
|
||||
* clocks, hence, enable the lane clocks only after PHY
|
||||
@ -562,41 +462,16 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
struct ufs_clk_info *clki;
|
||||
u32 core_clk_period_in_ns;
|
||||
u32 tx_clk_cycles_per_us = 0;
|
||||
unsigned long core_clk_rate = 0;
|
||||
u32 core_clk_cycles_per_us = 0;
|
||||
|
||||
static u32 pwm_fr_table[][2] = {
|
||||
{UFS_PWM_G1, 0x1},
|
||||
{UFS_PWM_G2, 0x1},
|
||||
{UFS_PWM_G3, 0x1},
|
||||
{UFS_PWM_G4, 0x1},
|
||||
};
|
||||
|
||||
static u32 hs_fr_table_rA[][2] = {
|
||||
{UFS_HS_G1, 0x1F},
|
||||
{UFS_HS_G2, 0x3e},
|
||||
{UFS_HS_G3, 0x7D},
|
||||
};
|
||||
|
||||
static u32 hs_fr_table_rB[][2] = {
|
||||
{UFS_HS_G1, 0x24},
|
||||
{UFS_HS_G2, 0x49},
|
||||
{UFS_HS_G3, 0x92},
|
||||
};
|
||||
u32 core_clk_cycles_per_us;
|
||||
|
||||
/*
|
||||
* The Qunipro controller does not use following registers:
|
||||
* SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
|
||||
* UFS_REG_PA_LINK_STARTUP_TIMER.
|
||||
* However UTP controller uses SYS1CLK_1US_REG register for Interrupt
|
||||
* UTP controller uses SYS1CLK_1US_REG register for Interrupt
|
||||
* Aggregation logic.
|
||||
* It is mandatory to write SYS1CLK_1US_REG register on UFS host
|
||||
* controller V4.0.0 onwards.
|
||||
*/
|
||||
if (host->hw_ver.major < 4 && ufs_qcom_cap_qunipro(host) &&
|
||||
!ufshcd_is_intr_aggr_allowed(hba))
|
||||
if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba))
|
||||
return 0;
|
||||
|
||||
if (gear == 0) {
|
||||
@ -629,79 +504,6 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
|
||||
mb();
|
||||
}
|
||||
|
||||
if (ufs_qcom_cap_qunipro(host))
|
||||
return 0;
|
||||
|
||||
core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
|
||||
core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
|
||||
core_clk_period_in_ns &= MASK_CLK_NS_REG;
|
||||
|
||||
switch (hs) {
|
||||
case FASTAUTO_MODE:
|
||||
case FAST_MODE:
|
||||
if (rate == PA_HS_MODE_A) {
|
||||
if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
|
||||
dev_err(hba->dev,
|
||||
"%s: index %d exceeds table size %zu\n",
|
||||
__func__, gear,
|
||||
ARRAY_SIZE(hs_fr_table_rA));
|
||||
return -EINVAL;
|
||||
}
|
||||
tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
|
||||
} else if (rate == PA_HS_MODE_B) {
|
||||
if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
|
||||
dev_err(hba->dev,
|
||||
"%s: index %d exceeds table size %zu\n",
|
||||
__func__, gear,
|
||||
ARRAY_SIZE(hs_fr_table_rB));
|
||||
return -EINVAL;
|
||||
}
|
||||
tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
|
||||
} else {
|
||||
dev_err(hba->dev, "%s: invalid rate = %d\n",
|
||||
__func__, rate);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case SLOWAUTO_MODE:
|
||||
case SLOW_MODE:
|
||||
if (gear > ARRAY_SIZE(pwm_fr_table)) {
|
||||
dev_err(hba->dev,
|
||||
"%s: index %d exceeds table size %zu\n",
|
||||
__func__, gear,
|
||||
ARRAY_SIZE(pwm_fr_table));
|
||||
return -EINVAL;
|
||||
}
|
||||
tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
|
||||
break;
|
||||
case UNCHANGED:
|
||||
default:
|
||||
dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
|
||||
(core_clk_period_in_ns | tx_clk_cycles_per_us)) {
|
||||
/* this register 2 fields shall be written at once */
|
||||
ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
|
||||
REG_UFS_TX_SYMBOL_CLK_NS_US);
|
||||
/*
|
||||
* make sure above write gets applied before we return from
|
||||
* this function.
|
||||
*/
|
||||
mb();
|
||||
}
|
||||
|
||||
if (update_link_startup_timer && host->hw_ver.major != 0x5) {
|
||||
ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
|
||||
REG_UFS_CFG0);
|
||||
/*
|
||||
* make sure that this configuration is applied before
|
||||
* we return
|
||||
*/
|
||||
mb();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -709,7 +511,6 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status status)
|
||||
{
|
||||
int err = 0;
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
|
||||
switch (status) {
|
||||
case PRE_CHANGE:
|
||||
@ -720,11 +521,9 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ufs_qcom_cap_qunipro(host)) {
|
||||
err = ufs_qcom_set_core_clk_ctrl(hba, true);
|
||||
if (err)
|
||||
dev_err(hba->dev, "cfg core clk ctrl failed\n");
|
||||
}
|
||||
err = ufs_qcom_set_core_clk_ctrl(hba, true);
|
||||
if (err)
|
||||
dev_err(hba->dev, "cfg core clk ctrl failed\n");
|
||||
/*
|
||||
* Some UFS devices (and may be host) have issues if LCC is
|
||||
* enabled. So we are setting PA_Local_TX_LCC_Enable to 0
|
||||
@ -1016,12 +815,7 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
|
||||
|
||||
static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
|
||||
if (host->hw_ver.major == 0x1)
|
||||
return ufshci_version(1, 1);
|
||||
else
|
||||
return ufshci_version(2, 0);
|
||||
return ufshci_version(2, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1037,27 +831,9 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
|
||||
if (host->hw_ver.major == 0x01) {
|
||||
hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
|
||||
| UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
|
||||
| UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
|
||||
|
||||
if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
|
||||
hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
|
||||
|
||||
hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
|
||||
}
|
||||
|
||||
if (host->hw_ver.major == 0x2) {
|
||||
if (host->hw_ver.major == 0x2)
|
||||
hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
|
||||
|
||||
if (!ufs_qcom_cap_qunipro(host))
|
||||
/* Legacy UniPro mode still need following quirks */
|
||||
hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
|
||||
| UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
|
||||
| UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
|
||||
}
|
||||
|
||||
if (host->hw_ver.major > 0x3)
|
||||
hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
|
||||
}
|
||||
@ -1112,19 +888,12 @@ static void ufs_qcom_set_host_params(struct ufs_hba *hba)
|
||||
|
||||
static void ufs_qcom_set_caps(struct ufs_hba *hba)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
|
||||
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
|
||||
hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
|
||||
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
|
||||
hba->caps |= UFSHCD_CAP_WB_EN;
|
||||
hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
|
||||
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
|
||||
|
||||
if (host->hw_ver.major >= 0x2) {
|
||||
host->caps = UFS_QCOM_CAP_QUNIPRO |
|
||||
UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1247,16 +1016,12 @@ static int ufs_qcom_init(struct ufs_hba *hba)
|
||||
{
|
||||
int err;
|
||||
struct device *dev = hba->dev;
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct ufs_qcom_host *host;
|
||||
struct resource *res;
|
||||
struct ufs_clk_info *clki;
|
||||
|
||||
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
|
||||
if (!host) {
|
||||
dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
|
||||
if (!host)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Make a two way bind between the qcom host and the hba */
|
||||
host->hba = hba;
|
||||
@ -1294,34 +1059,16 @@ static int ufs_qcom_init(struct ufs_hba *hba)
|
||||
host->device_reset = devm_gpiod_get_optional(dev, "reset",
|
||||
GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(host->device_reset)) {
|
||||
err = PTR_ERR(host->device_reset);
|
||||
if (err != -EPROBE_DEFER)
|
||||
dev_err(dev, "failed to acquire reset gpio: %d\n", err);
|
||||
err = dev_err_probe(dev, PTR_ERR(host->device_reset),
|
||||
"Failed to acquire device reset gpio\n");
|
||||
goto out_variant_clear;
|
||||
}
|
||||
|
||||
ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
|
||||
&host->hw_ver.minor, &host->hw_ver.step);
|
||||
|
||||
/*
|
||||
* for newer controllers, device reference clock control bit has
|
||||
* moved inside UFS controller register address space itself.
|
||||
*/
|
||||
if (host->hw_ver.major >= 0x02) {
|
||||
host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
|
||||
host->dev_ref_clk_en_mask = BIT(26);
|
||||
} else {
|
||||
/* "dev_ref_clk_ctrl_mem" is optional resource */
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
|
||||
"dev_ref_clk_ctrl_mem");
|
||||
if (res) {
|
||||
host->dev_ref_clk_ctrl_mmio =
|
||||
devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(host->dev_ref_clk_ctrl_mmio))
|
||||
host->dev_ref_clk_ctrl_mmio = NULL;
|
||||
host->dev_ref_clk_en_mask = BIT(5);
|
||||
}
|
||||
}
|
||||
host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
|
||||
host->dev_ref_clk_en_mask = BIT(26);
|
||||
|
||||
list_for_each_entry(clki, &hba->clk_list_head, list) {
|
||||
if (!strcmp(clki->name, "core_clk_unipro"))
|
||||
@ -1343,9 +1090,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
|
||||
|
||||
ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
|
||||
|
||||
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
|
||||
ufs_qcom_hosts[hba->dev->id] = host;
|
||||
|
||||
ufs_qcom_get_default_testbus_cfg(host);
|
||||
err = ufs_qcom_testbus_config(host);
|
||||
if (err)
|
||||
@ -1446,7 +1190,7 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
struct list_head *head = &hba->clk_list_head;
|
||||
struct ufs_clk_info *clki;
|
||||
u32 cycles_in_1us;
|
||||
u32 cycles_in_1us = 0;
|
||||
u32 core_clk_ctrl_reg;
|
||||
int err;
|
||||
|
||||
@ -1499,9 +1243,6 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
|
||||
struct ufs_pa_layer_attr *attr = &host->dev_req_params;
|
||||
int ret;
|
||||
|
||||
if (!ufs_qcom_cap_qunipro(host))
|
||||
return 0;
|
||||
|
||||
ret = ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
|
||||
attr->hs_rate, false, true);
|
||||
if (ret) {
|
||||
@ -1519,13 +1260,9 @@ static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
|
||||
|
||||
static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
int err;
|
||||
u32 core_clk_ctrl_reg;
|
||||
|
||||
if (!ufs_qcom_cap_qunipro(host))
|
||||
return 0;
|
||||
|
||||
err = ufshcd_dme_get(hba,
|
||||
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
|
||||
&core_clk_ctrl_reg);
|
||||
@ -1544,11 +1281,6 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
|
||||
|
||||
static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
|
||||
if (!ufs_qcom_cap_qunipro(host))
|
||||
return 0;
|
||||
|
||||
/* set unipro core clock attributes and clear clock divider */
|
||||
return ufs_qcom_set_core_clk_ctrl(hba, false);
|
||||
}
|
||||
@ -1557,7 +1289,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
|
||||
bool scale_up, enum ufs_notify_change_status status)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
/* check the host controller state before sending hibern8 cmd */
|
||||
if (!ufshcd_is_hba_active(hba))
|
||||
@ -1828,7 +1560,7 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
|
||||
struct platform_device *pdev = to_platform_device(hba->dev);
|
||||
struct ufshcd_res_info *res;
|
||||
struct resource *res_mem, *res_mcq;
|
||||
int i, ret = 0;
|
||||
int i, ret;
|
||||
|
||||
memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
|
||||
|
||||
@ -1840,7 +1572,7 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
|
||||
if (!res->resource) {
|
||||
dev_info(hba->dev, "Resource %s not provided\n", res->name);
|
||||
if (i == RES_UFS)
|
||||
return -ENOMEM;
|
||||
return -ENODEV;
|
||||
continue;
|
||||
} else if (i == RES_UFS) {
|
||||
res_mem = res->resource;
|
||||
@ -2011,11 +1743,8 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
|
||||
platform_msi_domain_free_irqs(hba->dev);
|
||||
} else {
|
||||
if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
|
||||
host->hw_ver.step == 0) {
|
||||
ufshcd_writel(hba,
|
||||
ufshcd_readl(hba, REG_UFS_CFG3) | 0x1F000,
|
||||
REG_UFS_CFG3);
|
||||
}
|
||||
host->hw_ver.step == 0)
|
||||
ufshcd_rmwl(hba, ESI_VEC_MASK, 0x1f00, REG_UFS_CFG3);
|
||||
ufshcd_mcq_enable_esi(hba);
|
||||
}
|
||||
|
||||
|
@ -10,24 +10,18 @@
|
||||
#include <soc/qcom/ice.h>
|
||||
#include <ufs/ufshcd.h>
|
||||
|
||||
#define MAX_UFS_QCOM_HOSTS 1
|
||||
#define MAX_U32 (~(u32)0)
|
||||
#define MPHY_TX_FSM_STATE 0x41
|
||||
#define TX_FSM_HIBERN8 0x1
|
||||
#define HBRN8_POLL_TOUT_MS 100
|
||||
#define DEFAULT_CLK_RATE_HZ 1000000
|
||||
#define BUS_VECTOR_NAME_LEN 32
|
||||
#define MAX_SUPP_MAC 64
|
||||
|
||||
#define UFS_HW_VER_MAJOR_MASK GENMASK(31, 28)
|
||||
#define UFS_HW_VER_MINOR_MASK GENMASK(27, 16)
|
||||
#define UFS_HW_VER_STEP_MASK GENMASK(15, 0)
|
||||
|
||||
#define UFS_DEV_VER_MAJOR_MASK GENMASK(7, 4)
|
||||
|
||||
/* vendor specific pre-defined parameters */
|
||||
#define SLOW 1
|
||||
#define FAST 2
|
||||
#define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B
|
||||
|
||||
/* QCOM UFS host controller vendor specific registers */
|
||||
enum {
|
||||
@ -95,9 +89,6 @@ enum {
|
||||
#define TEST_BUS_SEL GENMASK(22, 19)
|
||||
#define UFS_REG_TEST_BUS_EN BIT(30)
|
||||
|
||||
#define UFS_PHY_RESET_ENABLE 1
|
||||
#define UFS_PHY_RESET_DISABLE 0
|
||||
|
||||
/* bit definitions for REG_UFS_CFG2 register */
|
||||
#define UAWM_HW_CGC_EN BIT(0)
|
||||
#define UARM_HW_CGC_EN BIT(1)
|
||||
@ -108,6 +99,9 @@ enum {
|
||||
#define TMRLUT_HW_CGC_EN BIT(6)
|
||||
#define OCSC_HW_CGC_EN BIT(7)
|
||||
|
||||
/* bit definitions for REG_UFS_CFG3 register */
|
||||
#define ESI_VEC_MASK GENMASK(22, 12)
|
||||
|
||||
/* bit definitions for REG_UFS_PARAM0 */
|
||||
#define MAX_HS_GEAR_MASK GENMASK(6, 4)
|
||||
#define UFS_QCOM_MAX_GEAR(x) FIELD_GET(MAX_HS_GEAR_MASK, (x))
|
||||
@ -120,13 +114,6 @@ enum {
|
||||
DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
|
||||
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
|
||||
|
||||
/* bit offset */
|
||||
#define OFFSET_CLK_NS_REG 0xa
|
||||
|
||||
/* bit masks */
|
||||
#define MASK_TX_SYMBOL_CLK_1US_REG GENMASK(9, 0)
|
||||
#define MASK_CLK_NS_REG GENMASK(23, 10)
|
||||
|
||||
/* QUniPro Vendor specific attributes */
|
||||
#define PA_VS_CONFIG_REG1 0x9000
|
||||
#define DME_VS_CORE_CLK_CTRL 0xD002
|
||||
@ -160,8 +147,7 @@ ufs_qcom_get_controller_revision(struct ufs_hba *hba,
|
||||
|
||||
static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
|
||||
{
|
||||
ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, FIELD_PREP(UFS_PHY_SOFT_RESET, UFS_PHY_RESET_ENABLE),
|
||||
REG_UFS_CFG1);
|
||||
ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
|
||||
|
||||
/*
|
||||
* Make sure assertion of ufs phy reset is written to
|
||||
@ -172,8 +158,7 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
|
||||
|
||||
static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
|
||||
{
|
||||
ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, FIELD_PREP(UFS_PHY_SOFT_RESET, UFS_PHY_RESET_DISABLE),
|
||||
REG_UFS_CFG1);
|
||||
ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, 0, REG_UFS_CFG1);
|
||||
|
||||
/*
|
||||
* Make sure de-assertion of ufs phy reset is written to
|
||||
@ -197,28 +182,11 @@ struct ufs_qcom_testbus {
|
||||
struct gpio_desc;
|
||||
|
||||
struct ufs_qcom_host {
|
||||
/*
|
||||
* Set this capability if host controller supports the QUniPro mode
|
||||
* and if driver wants the Host controller to operate in QUniPro mode.
|
||||
* Note: By default this capability will be kept enabled if host
|
||||
* controller supports the QUniPro mode.
|
||||
*/
|
||||
#define UFS_QCOM_CAP_QUNIPRO 0x1
|
||||
|
||||
/*
|
||||
* Set this capability if host controller can retain the secure
|
||||
* configuration even after UFS controller core power collapse.
|
||||
*/
|
||||
#define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE 0x2
|
||||
u32 caps;
|
||||
|
||||
struct phy *generic_phy;
|
||||
struct ufs_hba *hba;
|
||||
struct ufs_pa_layer_attr dev_req_params;
|
||||
struct clk *rx_l0_sync_clk;
|
||||
struct clk *tx_l0_sync_clk;
|
||||
struct clk *rx_l1_sync_clk;
|
||||
struct clk *tx_l1_sync_clk;
|
||||
struct clk_bulk_data *clks;
|
||||
u32 num_clks;
|
||||
bool is_lane_clks_enabled;
|
||||
|
||||
struct icc_path *icc_ddr;
|
||||
@ -264,9 +232,4 @@ ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
|
||||
|
||||
int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
|
||||
|
||||
static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
|
||||
{
|
||||
return host->caps & UFS_QCOM_CAP_QUNIPRO;
|
||||
}
|
||||
|
||||
#endif /* UFS_QCOM_H_ */
|
||||
|
@ -1236,6 +1236,8 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
|
||||
ufshcd_writel(hba, tmp, reg);
|
||||
}
|
||||
|
||||
void ufshcd_enable_irq(struct ufs_hba *hba);
|
||||
void ufshcd_disable_irq(struct ufs_hba *hba);
|
||||
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
|
||||
void ufshcd_dealloc_host(struct ufs_hba *);
|
||||
int ufshcd_hba_enable(struct ufs_hba *hba);
|
||||
|
Loading…
Reference in New Issue
Block a user