2020-03-05 14:39:58 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* Thunderbolt/USB4 retimer support.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2020, Intel Corporation
|
|
|
|
* Authors: Kranthi Kuntala <kranthi.kuntala@intel.com>
|
|
|
|
* Mika Westerberg <mika.westerberg@linux.intel.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/pm_runtime.h>
|
|
|
|
#include <linux/sched/signal.h>
|
|
|
|
|
|
|
|
#include "sb_regs.h"
|
|
|
|
#include "tb.h"
|
|
|
|
|
2023-03-21 09:40:49 +00:00
|
|
|
#if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)
|
2020-03-05 14:39:58 +00:00
|
|
|
#define TB_MAX_RETIMER_INDEX 6
|
2023-03-21 09:40:49 +00:00
|
|
|
#else
|
|
|
|
#define TB_MAX_RETIMER_INDEX 2
|
|
|
|
#endif
|
2020-03-05 14:39:58 +00:00
|
|
|
|
2022-09-03 07:43:25 +00:00
|
|
|
/**
|
|
|
|
* tb_retimer_nvm_read() - Read contents of retimer NVM
|
|
|
|
* @rt: Retimer device
|
|
|
|
* @address: NVM address (in bytes) to start reading
|
|
|
|
* @buf: Data read from NVM is stored here
|
|
|
|
* @size: Number of bytes to read
|
|
|
|
*
|
|
|
|
* Reads retimer NVM and copies the contents to @buf. Returns %0 if the
|
|
|
|
* read was successful and negative errno in case of failure.
|
|
|
|
*/
|
|
|
|
int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
|
2020-03-05 14:39:58 +00:00
|
|
|
{
|
|
|
|
struct tb_nvm *nvm = priv;
|
|
|
|
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
pm_runtime_get_sync(&rt->dev);
|
|
|
|
|
|
|
|
if (!mutex_trylock(&rt->tb->lock)) {
|
|
|
|
ret = restart_syscall();
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-09-03 07:43:25 +00:00
|
|
|
ret = tb_retimer_nvm_read(rt, offset, val, bytes);
|
2020-03-05 14:39:58 +00:00
|
|
|
mutex_unlock(&rt->tb->lock);
|
|
|
|
|
|
|
|
out:
|
|
|
|
pm_runtime_mark_last_busy(&rt->dev);
|
|
|
|
pm_runtime_put_autosuspend(&rt->dev);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-09-03 07:43:25 +00:00
|
|
|
static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
|
2020-03-05 14:39:58 +00:00
|
|
|
{
|
|
|
|
struct tb_nvm *nvm = priv;
|
|
|
|
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!mutex_trylock(&rt->tb->lock))
|
|
|
|
return restart_syscall();
|
|
|
|
|
|
|
|
ret = tb_nvm_write_buf(nvm, offset, val, bytes);
|
|
|
|
mutex_unlock(&rt->tb->lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tb_retimer_nvm_add(struct tb_retimer *rt)
|
|
|
|
{
|
|
|
|
struct tb_nvm *nvm;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
nvm = tb_nvm_alloc(&rt->dev);
|
2022-09-02 09:40:08 +00:00
|
|
|
if (IS_ERR(nvm)) {
|
|
|
|
ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
|
2020-03-05 14:39:58 +00:00
|
|
|
goto err_nvm;
|
2022-09-02 09:40:08 +00:00
|
|
|
}
|
2020-03-05 14:39:58 +00:00
|
|
|
|
2022-09-02 09:40:08 +00:00
|
|
|
ret = tb_nvm_read_version(nvm);
|
2020-03-05 14:39:58 +00:00
|
|
|
if (ret)
|
|
|
|
goto err_nvm;
|
|
|
|
|
2022-09-02 09:40:08 +00:00
|
|
|
ret = tb_nvm_add_active(nvm, nvm_read);
|
2020-03-05 14:39:58 +00:00
|
|
|
if (ret)
|
|
|
|
goto err_nvm;
|
|
|
|
|
2022-09-02 09:40:08 +00:00
|
|
|
ret = tb_nvm_add_non_active(nvm, nvm_write);
|
2020-03-05 14:39:58 +00:00
|
|
|
if (ret)
|
|
|
|
goto err_nvm;
|
|
|
|
|
|
|
|
rt->nvm = nvm;
|
2023-09-20 09:13:11 +00:00
|
|
|
dev_dbg(&rt->dev, "NVM version %x.%x\n", nvm->major, nvm->minor);
|
2020-03-05 14:39:58 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_nvm:
|
2022-09-02 09:40:08 +00:00
|
|
|
dev_dbg(&rt->dev, "NVM upgrade disabled\n");
|
|
|
|
if (!IS_ERR(nvm))
|
|
|
|
tb_nvm_free(nvm);
|
|
|
|
|
2020-03-05 14:39:58 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
|
|
|
|
{
|
2022-09-02 09:40:08 +00:00
|
|
|
unsigned int image_size;
|
|
|
|
const u8 *buf;
|
2021-04-12 12:29:16 +00:00
|
|
|
int ret;
|
2020-03-05 14:39:58 +00:00
|
|
|
|
2022-09-02 09:40:08 +00:00
|
|
|
ret = tb_nvm_validate(rt->nvm);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2020-03-05 14:39:58 +00:00
|
|
|
|
2022-09-02 09:40:08 +00:00
|
|
|
buf = rt->nvm->buf_data_start;
|
|
|
|
image_size = rt->nvm->buf_data_size;
|
2020-03-05 14:39:58 +00:00
|
|
|
|
2021-04-12 12:29:16 +00:00
|
|
|
ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
|
|
|
|
image_size);
|
2022-09-02 09:40:08 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2021-04-12 12:29:16 +00:00
|
|
|
|
2022-09-02 09:40:08 +00:00
|
|
|
rt->nvm->flushed = true;
|
|
|
|
return 0;
|
2021-04-12 12:29:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
|
|
|
|
{
|
2021-04-21 14:14:10 +00:00
|
|
|
u32 status;
|
2021-04-12 12:29:16 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (auth_only) {
|
|
|
|
ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-04-21 14:14:10 +00:00
|
|
|
ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
usleep_range(100, 150);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check the status now if we still can access the retimer. It
|
|
|
|
* is expected that the below fails.
|
|
|
|
*/
|
|
|
|
ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index,
|
|
|
|
&status);
|
|
|
|
if (!ret) {
|
|
|
|
rt->auth_status = status;
|
|
|
|
return status ? -EINVAL : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2020-03-05 14:39:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t device_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct tb_retimer *rt = tb_to_retimer(dev);
|
|
|
|
|
2022-09-22 14:32:39 +00:00
|
|
|
return sysfs_emit(buf, "%#x\n", rt->device);
|
2020-03-05 14:39:58 +00:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(device);
|
|
|
|
|
|
|
|
static ssize_t nvm_authenticate_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct tb_retimer *rt = tb_to_retimer(dev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!mutex_trylock(&rt->tb->lock))
|
|
|
|
return restart_syscall();
|
|
|
|
|
|
|
|
if (!rt->nvm)
|
|
|
|
ret = -EAGAIN;
|
2022-09-02 09:40:08 +00:00
|
|
|
else if (rt->no_nvm_upgrade)
|
|
|
|
ret = -EOPNOTSUPP;
|
2020-03-05 14:39:58 +00:00
|
|
|
else
|
2022-09-22 14:32:39 +00:00
|
|
|
ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
|
2020-03-05 14:39:58 +00:00
|
|
|
|
|
|
|
mutex_unlock(&rt->tb->lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-05-26 11:46:44 +00:00
|
|
|
static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
tb_port_dbg(port, "reading NVM authentication status of retimers\n");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Before doing anything else, read the authentication status.
|
|
|
|
* If the retimer has it set, store it for the new retimer
|
|
|
|
* device instance.
|
|
|
|
*/
|
2024-04-05 04:47:40 +00:00
|
|
|
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
|
|
|
|
if (usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]))
|
|
|
|
break;
|
|
|
|
}
|
2023-05-26 11:46:44 +00:00
|
|
|
}
|
|
|
|
|
2023-03-02 22:17:24 +00:00
|
|
|
static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2023-05-26 11:55:20 +00:00
|
|
|
/*
|
|
|
|
* When USB4 port is online sideband communications are
|
|
|
|
* already up.
|
|
|
|
*/
|
|
|
|
if (!usb4_port_device_is_offline(port->usb4))
|
|
|
|
return;
|
|
|
|
|
|
|
|
tb_port_dbg(port, "enabling sideband transactions\n");
|
|
|
|
|
2023-03-02 22:17:24 +00:00
|
|
|
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
|
|
|
|
usb4_port_retimer_set_inbound_sbtx(port, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2023-05-26 11:55:20 +00:00
|
|
|
/*
|
|
|
|
* When USB4 port is offline we need to keep the sideband
|
|
|
|
* communications up to make it possible to communicate with
|
|
|
|
* the connected retimers.
|
|
|
|
*/
|
|
|
|
if (usb4_port_device_is_offline(port->usb4))
|
|
|
|
return;
|
|
|
|
|
|
|
|
tb_port_dbg(port, "disabling sideband transactions\n");
|
|
|
|
|
2024-04-05 04:47:40 +00:00
|
|
|
for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--) {
|
|
|
|
if (usb4_port_retimer_unset_inbound_sbtx(port, i))
|
|
|
|
break;
|
|
|
|
}
|
2023-03-02 22:17:24 +00:00
|
|
|
}
|
|
|
|
|
2020-03-05 14:39:58 +00:00
|
|
|
static ssize_t nvm_authenticate_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct tb_retimer *rt = tb_to_retimer(dev);
|
2021-04-12 12:29:16 +00:00
|
|
|
int val, ret;
|
2020-03-05 14:39:58 +00:00
|
|
|
|
|
|
|
pm_runtime_get_sync(&rt->dev);
|
|
|
|
|
|
|
|
if (!mutex_trylock(&rt->tb->lock)) {
|
|
|
|
ret = restart_syscall();
|
|
|
|
goto exit_rpm;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!rt->nvm) {
|
|
|
|
ret = -EAGAIN;
|
|
|
|
goto exit_unlock;
|
|
|
|
}
|
|
|
|
|
2021-04-12 12:29:16 +00:00
|
|
|
ret = kstrtoint(buf, 10, &val);
|
2020-03-05 14:39:58 +00:00
|
|
|
if (ret)
|
|
|
|
goto exit_unlock;
|
|
|
|
|
|
|
|
/* Always clear status */
|
|
|
|
rt->auth_status = 0;
|
|
|
|
|
|
|
|
if (val) {
|
2023-05-26 11:51:23 +00:00
|
|
|
/*
|
|
|
|
* When NVM authentication starts the retimer is not
|
|
|
|
* accessible so calling tb_retimer_unset_inbound_sbtx()
|
|
|
|
* will fail and therefore we do not call it. Exception
|
|
|
|
* is when the validation fails or we only write the new
|
|
|
|
* NVM image without authentication.
|
|
|
|
*/
|
2023-03-02 22:17:24 +00:00
|
|
|
tb_retimer_set_inbound_sbtx(rt->port);
|
2021-04-12 12:29:16 +00:00
|
|
|
if (val == AUTHENTICATE_ONLY) {
|
|
|
|
ret = tb_retimer_nvm_authenticate(rt, true);
|
|
|
|
} else {
|
|
|
|
if (!rt->nvm->flushed) {
|
|
|
|
if (!rt->nvm->buf) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto exit_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = tb_retimer_nvm_validate_and_write(rt);
|
|
|
|
if (ret || val == WRITE_ONLY)
|
|
|
|
goto exit_unlock;
|
|
|
|
}
|
|
|
|
if (val == WRITE_AND_AUTHENTICATE)
|
|
|
|
ret = tb_retimer_nvm_authenticate(rt, false);
|
2020-03-05 14:39:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
exit_unlock:
|
2023-05-26 11:51:23 +00:00
|
|
|
if (ret || val == WRITE_ONLY)
|
|
|
|
tb_retimer_unset_inbound_sbtx(rt->port);
|
2020-03-05 14:39:58 +00:00
|
|
|
mutex_unlock(&rt->tb->lock);
|
|
|
|
exit_rpm:
|
|
|
|
pm_runtime_mark_last_busy(&rt->dev);
|
|
|
|
pm_runtime_put_autosuspend(&rt->dev);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(nvm_authenticate);
|
|
|
|
|
|
|
|
static ssize_t nvm_version_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct tb_retimer *rt = tb_to_retimer(dev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!mutex_trylock(&rt->tb->lock))
|
|
|
|
return restart_syscall();
|
|
|
|
|
|
|
|
if (!rt->nvm)
|
|
|
|
ret = -EAGAIN;
|
2023-03-21 09:40:49 +00:00
|
|
|
else if (rt->no_nvm_upgrade)
|
|
|
|
ret = -EOPNOTSUPP;
|
2020-03-05 14:39:58 +00:00
|
|
|
else
|
2022-09-22 14:32:39 +00:00
|
|
|
ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
|
2020-03-05 14:39:58 +00:00
|
|
|
|
|
|
|
mutex_unlock(&rt->tb->lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(nvm_version);
|
|
|
|
|
|
|
|
static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct tb_retimer *rt = tb_to_retimer(dev);
|
|
|
|
|
2022-09-22 14:32:39 +00:00
|
|
|
return sysfs_emit(buf, "%#x\n", rt->vendor);
|
2020-03-05 14:39:58 +00:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(vendor);
|
|
|
|
|
|
|
|
static struct attribute *retimer_attrs[] = {
|
|
|
|
&dev_attr_device.attr,
|
|
|
|
&dev_attr_nvm_authenticate.attr,
|
|
|
|
&dev_attr_nvm_version.attr,
|
|
|
|
&dev_attr_vendor.attr,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group retimer_group = {
|
|
|
|
.attrs = retimer_attrs,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group *retimer_groups[] = {
|
|
|
|
&retimer_group,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static void tb_retimer_release(struct device *dev)
|
|
|
|
{
|
|
|
|
struct tb_retimer *rt = tb_to_retimer(dev);
|
|
|
|
|
|
|
|
kfree(rt);
|
|
|
|
}
|
|
|
|
|
2024-02-19 12:45:50 +00:00
|
|
|
const struct device_type tb_retimer_type = {
|
2020-03-05 14:39:58 +00:00
|
|
|
.name = "thunderbolt_retimer",
|
|
|
|
.groups = retimer_groups,
|
|
|
|
.release = tb_retimer_release,
|
|
|
|
};
|
|
|
|
|
2023-03-21 09:40:49 +00:00
|
|
|
static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status,
|
|
|
|
bool on_board)
|
2020-03-05 14:39:58 +00:00
|
|
|
{
|
|
|
|
struct tb_retimer *rt;
|
|
|
|
u32 vendor, device;
|
|
|
|
int ret;
|
|
|
|
|
2024-04-16 06:41:14 +00:00
|
|
|
ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
|
|
|
|
USB4_SB_VENDOR_ID, &vendor, sizeof(vendor));
|
2020-03-05 14:39:58 +00:00
|
|
|
if (ret) {
|
|
|
|
if (ret != -ENODEV)
|
|
|
|
tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-04-16 06:41:14 +00:00
|
|
|
ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
|
|
|
|
USB4_SB_PRODUCT_ID, &device, sizeof(device));
|
2020-03-05 14:39:58 +00:00
|
|
|
if (ret) {
|
|
|
|
if (ret != -ENODEV)
|
|
|
|
tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
rt = kzalloc(sizeof(*rt), GFP_KERNEL);
|
|
|
|
if (!rt)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
rt->index = index;
|
|
|
|
rt->vendor = vendor;
|
|
|
|
rt->device = device;
|
|
|
|
rt->auth_status = auth_status;
|
|
|
|
rt->port = port;
|
|
|
|
rt->tb = port->sw->tb;
|
|
|
|
|
2023-03-21 09:40:49 +00:00
|
|
|
/*
|
|
|
|
* Only support NVM upgrade for on-board retimers. The retimers
|
|
|
|
* on the other side of the connection.
|
|
|
|
*/
|
|
|
|
if (!on_board || usb4_port_retimer_nvm_sector_size(port, index) <= 0)
|
|
|
|
rt->no_nvm_upgrade = true;
|
|
|
|
|
2021-11-15 17:10:51 +00:00
|
|
|
rt->dev.parent = &port->usb4->dev;
|
2020-03-05 14:39:58 +00:00
|
|
|
rt->dev.bus = &tb_bus_type;
|
|
|
|
rt->dev.type = &tb_retimer_type;
|
|
|
|
dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
|
|
|
|
port->port, index);
|
|
|
|
|
|
|
|
ret = device_register(&rt->dev);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
|
|
|
|
put_device(&rt->dev);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = tb_retimer_nvm_add(rt);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
|
2021-03-29 06:07:18 +00:00
|
|
|
device_unregister(&rt->dev);
|
2020-03-05 14:39:58 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
|
|
|
|
rt->vendor, rt->device);
|
|
|
|
|
|
|
|
pm_runtime_no_callbacks(&rt->dev);
|
|
|
|
pm_runtime_set_active(&rt->dev);
|
|
|
|
pm_runtime_enable(&rt->dev);
|
|
|
|
pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
|
|
|
|
pm_runtime_mark_last_busy(&rt->dev);
|
|
|
|
pm_runtime_use_autosuspend(&rt->dev);
|
|
|
|
|
2023-03-20 11:50:44 +00:00
|
|
|
tb_retimer_debugfs_init(rt);
|
2020-03-05 14:39:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tb_retimer_remove(struct tb_retimer *rt)
|
|
|
|
{
|
|
|
|
dev_info(&rt->dev, "retimer disconnected\n");
|
2023-03-20 11:50:44 +00:00
|
|
|
tb_retimer_debugfs_remove(rt);
|
2020-03-05 14:39:58 +00:00
|
|
|
tb_nvm_free(rt->nvm);
|
|
|
|
device_unregister(&rt->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct tb_retimer_lookup {
|
|
|
|
const struct tb_port *port;
|
|
|
|
u8 index;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int retimer_match(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
const struct tb_retimer_lookup *lookup = data;
|
|
|
|
struct tb_retimer *rt = tb_to_retimer(dev);
|
|
|
|
|
|
|
|
return rt && rt->port == lookup->port && rt->index == lookup->index;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
|
|
|
|
{
|
|
|
|
struct tb_retimer_lookup lookup = { .port = port, .index = index };
|
|
|
|
struct device *dev;
|
|
|
|
|
2021-04-01 14:34:20 +00:00
|
|
|
dev = device_find_child(&port->usb4->dev, &lookup, retimer_match);
|
2020-03-05 14:39:58 +00:00
|
|
|
if (dev)
|
|
|
|
return tb_to_retimer(dev);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tb_retimer_scan() - Scan for on-board retimers under port
|
|
|
|
* @port: USB4 port to scan
|
2021-04-01 15:42:38 +00:00
|
|
|
* @add: If true also registers found retimers
|
2020-03-05 14:39:58 +00:00
|
|
|
*
|
2021-04-01 15:42:38 +00:00
|
|
|
* Brings the sideband into a state where retimers can be accessed.
|
|
|
|
* Then Tries to enumerate on-board retimers connected to @port. Found
|
|
|
|
* retimers are registered as children of @port if @add is set. Does
|
|
|
|
* not scan for cable retimers for now.
|
2020-03-05 14:39:58 +00:00
|
|
|
*/
|
2021-04-01 15:42:38 +00:00
|
|
|
int tb_retimer_scan(struct tb_port *port, bool add)
|
2020-03-05 14:39:58 +00:00
|
|
|
{
|
2021-03-29 06:08:01 +00:00
|
|
|
u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
|
2023-03-21 09:40:49 +00:00
|
|
|
int ret, i, max, last_idx = 0;
|
2020-03-05 14:39:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Send broadcast RT to make sure retimer indices facing this
|
|
|
|
* port are set.
|
|
|
|
*/
|
|
|
|
ret = usb4_port_enumerate_retimers(port);
|
|
|
|
if (ret)
|
2022-12-29 12:10:30 +00:00
|
|
|
return ret;
|
2020-03-05 14:39:58 +00:00
|
|
|
|
2021-04-01 15:42:38 +00:00
|
|
|
/*
|
2023-05-26 11:46:44 +00:00
|
|
|
* Immediately after sending enumerate retimers read the
|
|
|
|
* authentication status of each retimer.
|
2021-04-01 15:42:38 +00:00
|
|
|
*/
|
2023-05-26 11:46:44 +00:00
|
|
|
tb_retimer_nvm_authenticate_status(port, status);
|
2021-04-01 15:42:38 +00:00
|
|
|
|
2020-03-05 14:39:58 +00:00
|
|
|
/*
|
2023-05-26 11:46:44 +00:00
|
|
|
* Enable sideband channel for each retimer. We can do this
|
|
|
|
* regardless whether there is device connected or not.
|
2020-03-05 14:39:58 +00:00
|
|
|
*/
|
2023-05-26 11:46:44 +00:00
|
|
|
tb_retimer_set_inbound_sbtx(port);
|
2020-03-05 14:39:58 +00:00
|
|
|
|
2024-09-25 09:59:20 +00:00
|
|
|
for (max = 1, i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
|
2020-03-05 14:39:58 +00:00
|
|
|
/*
|
|
|
|
* Last retimer is true only for the last on-board
|
|
|
|
* retimer (the one connected directly to the Type-C
|
|
|
|
* port).
|
|
|
|
*/
|
|
|
|
ret = usb4_port_retimer_is_last(port, i);
|
|
|
|
if (ret > 0)
|
|
|
|
last_idx = i;
|
|
|
|
else if (ret < 0)
|
|
|
|
break;
|
2024-09-25 09:59:20 +00:00
|
|
|
|
|
|
|
max = i;
|
2020-03-05 14:39:58 +00:00
|
|
|
}
|
|
|
|
|
2022-12-29 12:10:30 +00:00
|
|
|
ret = 0;
|
2024-10-24 09:26:53 +00:00
|
|
|
if (!IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING))
|
|
|
|
max = min(last_idx, max);
|
2023-03-21 09:40:49 +00:00
|
|
|
|
|
|
|
/* Add retimers if they do not exist already */
|
|
|
|
for (i = 1; i <= max; i++) {
|
2020-03-05 14:39:58 +00:00
|
|
|
struct tb_retimer *rt;
|
|
|
|
|
2023-03-21 09:40:49 +00:00
|
|
|
/* Skip cable retimers */
|
|
|
|
if (usb4_port_retimer_is_cable(port, i))
|
|
|
|
continue;
|
|
|
|
|
2020-03-05 14:39:58 +00:00
|
|
|
rt = tb_port_find_retimer(port, i);
|
|
|
|
if (rt) {
|
|
|
|
put_device(&rt->dev);
|
2021-04-01 15:42:38 +00:00
|
|
|
} else if (add) {
|
2023-03-21 09:40:49 +00:00
|
|
|
ret = tb_retimer_add(port, i, status[i], i <= last_idx);
|
2020-03-05 14:39:58 +00:00
|
|
|
if (ret && ret != -EOPNOTSUPP)
|
2021-04-01 15:42:38 +00:00
|
|
|
break;
|
2020-03-05 14:39:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-21 09:40:49 +00:00
|
|
|
tb_retimer_unset_inbound_sbtx(port);
|
2021-11-15 17:10:51 +00:00
|
|
|
return ret;
|
2020-03-05 14:39:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int remove_retimer(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
struct tb_retimer *rt = tb_to_retimer(dev);
|
|
|
|
struct tb_port *port = data;
|
|
|
|
|
|
|
|
if (rt && rt->port == port)
|
|
|
|
tb_retimer_remove(rt);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tb_retimer_remove_all() - Remove all retimers under port
|
|
|
|
* @port: USB4 port whose retimers to remove
|
|
|
|
*
|
|
|
|
* This removes all previously added retimers under @port.
|
|
|
|
*/
|
|
|
|
void tb_retimer_remove_all(struct tb_port *port)
|
|
|
|
{
|
2021-04-01 14:34:20 +00:00
|
|
|
struct usb4_port *usb4;
|
|
|
|
|
|
|
|
usb4 = port->usb4;
|
|
|
|
if (usb4)
|
|
|
|
device_for_each_child_reverse(&usb4->dev, port,
|
2020-03-05 14:39:58 +00:00
|
|
|
remove_retimer);
|
|
|
|
}
|