mirror of
https://github.com/torvalds/linux.git
synced 2024-12-30 23:02:08 +00:00
a39f841a5b
In the current driver for Coresight components, two features of PTM components are missing: 1. Branch Broadcasting (present also in ETM but called Branch Output) 2. Return Stack (only present in PTM v1.0 and PTMv1.1) These features can be added simply to the code using `mode` field of `etm_config` struct. 1. **Branch Broadcast** : The branch broadcast feature is present in ETM components as well and is called Branch output. It allows to retrieve addresses for direct branch addresses alongside the indirect branch addresses. For example, it could be useful in cases when tracing without source code. 2. **Return Stack** : The return stack option allows to retrieve the return addresses of function calls. It can be useful to avoid CRA (Code Reuse Attacks) by keeping a shadowstack. Signed-off-by: Muhammad Abdul Wahab <muhammadabdul.wahab@centralesupelec.fr> Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1277 lines
32 KiB
C
1277 lines
32 KiB
C
/*
|
|
* Copyright(C) 2015 Linaro Limited. All rights reserved.
|
|
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms of the GNU General Public License version 2 as published by
|
|
* the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License along with
|
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#include <linux/pm_runtime.h>
|
|
#include <linux/sysfs.h>
|
|
#include "coresight-etm.h"
|
|
#include "coresight-priv.h"
|
|
|
|
static ssize_t nr_addr_cmp_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
|
|
val = drvdata->nr_addr_cmp;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
static DEVICE_ATTR_RO(nr_addr_cmp);
|
|
|
|
static ssize_t nr_cntr_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{ unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
|
|
val = drvdata->nr_cntr;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
static DEVICE_ATTR_RO(nr_cntr);
|
|
|
|
static ssize_t nr_ctxid_cmp_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
|
|
val = drvdata->nr_ctxid_cmp;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
static DEVICE_ATTR_RO(nr_ctxid_cmp);
|
|
|
|
static ssize_t etmsr_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long flags, val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
|
|
pm_runtime_get_sync(drvdata->dev);
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
CS_UNLOCK(drvdata->base);
|
|
|
|
val = etm_readl(drvdata, ETMSR);
|
|
|
|
CS_LOCK(drvdata->base);
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
pm_runtime_put(drvdata->dev);
|
|
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
static DEVICE_ATTR_RO(etmsr);
|
|
|
|
static ssize_t reset_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int i, ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (val) {
|
|
spin_lock(&drvdata->spinlock);
|
|
memset(config, 0, sizeof(struct etm_config));
|
|
config->mode = ETM_MODE_EXCLUDE;
|
|
config->trigger_event = ETM_DEFAULT_EVENT_VAL;
|
|
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
|
|
config->addr_type[i] = ETM_ADDR_TYPE_NONE;
|
|
}
|
|
|
|
etm_set_default(config);
|
|
spin_unlock(&drvdata->spinlock);
|
|
}
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_WO(reset);
|
|
|
|
static ssize_t mode_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->mode;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t mode_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
config->mode = val & ETM_MODE_ALL;
|
|
|
|
if (config->mode & ETM_MODE_EXCLUDE)
|
|
config->enable_ctrl1 |= ETMTECR1_INC_EXC;
|
|
else
|
|
config->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
|
|
|
|
if (config->mode & ETM_MODE_CYCACC)
|
|
config->ctrl |= ETMCR_CYC_ACC;
|
|
else
|
|
config->ctrl &= ~ETMCR_CYC_ACC;
|
|
|
|
if (config->mode & ETM_MODE_STALL) {
|
|
if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
|
|
dev_warn(drvdata->dev, "stall mode not supported\n");
|
|
ret = -EINVAL;
|
|
goto err_unlock;
|
|
}
|
|
config->ctrl |= ETMCR_STALL_MODE;
|
|
} else
|
|
config->ctrl &= ~ETMCR_STALL_MODE;
|
|
|
|
if (config->mode & ETM_MODE_TIMESTAMP) {
|
|
if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
|
|
dev_warn(drvdata->dev, "timestamp not supported\n");
|
|
ret = -EINVAL;
|
|
goto err_unlock;
|
|
}
|
|
config->ctrl |= ETMCR_TIMESTAMP_EN;
|
|
} else
|
|
config->ctrl &= ~ETMCR_TIMESTAMP_EN;
|
|
|
|
if (config->mode & ETM_MODE_CTXID)
|
|
config->ctrl |= ETMCR_CTXID_SIZE;
|
|
else
|
|
config->ctrl &= ~ETMCR_CTXID_SIZE;
|
|
|
|
if (config->mode & ETM_MODE_BBROAD)
|
|
config->ctrl |= ETMCR_BRANCH_BROADCAST;
|
|
else
|
|
config->ctrl &= ~ETMCR_BRANCH_BROADCAST;
|
|
|
|
if (config->mode & ETM_MODE_RET_STACK)
|
|
config->ctrl |= ETMCR_RETURN_STACK;
|
|
else
|
|
config->ctrl &= ~ETMCR_RETURN_STACK;
|
|
|
|
if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
|
|
etm_config_trace_mode(config);
|
|
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return size;
|
|
|
|
err_unlock:
|
|
spin_unlock(&drvdata->spinlock);
|
|
return ret;
|
|
}
|
|
static DEVICE_ATTR_RW(mode);
|
|
|
|
static ssize_t trigger_event_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->trigger_event;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t trigger_event_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
config->trigger_event = val & ETM_EVENT_MASK;
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(trigger_event);
|
|
|
|
static ssize_t enable_event_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->enable_event;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t enable_event_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
config->enable_event = val & ETM_EVENT_MASK;
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(enable_event);
|
|
|
|
static ssize_t fifofull_level_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->fifofull_level;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t fifofull_level_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
config->fifofull_level = val;
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(fifofull_level);
|
|
|
|
static ssize_t addr_idx_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->addr_idx;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t addr_idx_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (val >= drvdata->nr_addr_cmp)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Use spinlock to ensure index doesn't change while it gets
|
|
* dereferenced multiple times within a spinlock block elsewhere.
|
|
*/
|
|
spin_lock(&drvdata->spinlock);
|
|
config->addr_idx = val;
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(addr_idx);
|
|
|
|
static ssize_t addr_single_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
u8 idx;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
idx = config->addr_idx;
|
|
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
|
|
config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
|
|
spin_unlock(&drvdata->spinlock);
|
|
return -EINVAL;
|
|
}
|
|
|
|
val = config->addr_val[idx];
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t addr_single_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
u8 idx;
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
idx = config->addr_idx;
|
|
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
|
|
config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
|
|
spin_unlock(&drvdata->spinlock);
|
|
return -EINVAL;
|
|
}
|
|
|
|
config->addr_val[idx] = val;
|
|
config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(addr_single);
|
|
|
|
static ssize_t addr_range_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
u8 idx;
|
|
unsigned long val1, val2;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
idx = config->addr_idx;
|
|
if (idx % 2 != 0) {
|
|
spin_unlock(&drvdata->spinlock);
|
|
return -EPERM;
|
|
}
|
|
if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
|
|
config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
|
|
(config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
|
|
config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
|
|
spin_unlock(&drvdata->spinlock);
|
|
return -EPERM;
|
|
}
|
|
|
|
val1 = config->addr_val[idx];
|
|
val2 = config->addr_val[idx + 1];
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return sprintf(buf, "%#lx %#lx\n", val1, val2);
|
|
}
|
|
|
|
static ssize_t addr_range_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
u8 idx;
|
|
unsigned long val1, val2;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
|
|
return -EINVAL;
|
|
/* Lower address comparator cannot have a higher address value */
|
|
if (val1 > val2)
|
|
return -EINVAL;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
idx = config->addr_idx;
|
|
if (idx % 2 != 0) {
|
|
spin_unlock(&drvdata->spinlock);
|
|
return -EPERM;
|
|
}
|
|
if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
|
|
config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
|
|
(config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
|
|
config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
|
|
spin_unlock(&drvdata->spinlock);
|
|
return -EPERM;
|
|
}
|
|
|
|
config->addr_val[idx] = val1;
|
|
config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
|
|
config->addr_val[idx + 1] = val2;
|
|
config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
|
|
config->enable_ctrl1 |= (1 << (idx/2));
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(addr_range);
|
|
|
|
static ssize_t addr_start_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
u8 idx;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
idx = config->addr_idx;
|
|
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
|
|
config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
|
|
spin_unlock(&drvdata->spinlock);
|
|
return -EPERM;
|
|
}
|
|
|
|
val = config->addr_val[idx];
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t addr_start_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
u8 idx;
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
idx = config->addr_idx;
|
|
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
|
|
config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
|
|
spin_unlock(&drvdata->spinlock);
|
|
return -EPERM;
|
|
}
|
|
|
|
config->addr_val[idx] = val;
|
|
config->addr_type[idx] = ETM_ADDR_TYPE_START;
|
|
config->startstop_ctrl |= (1 << idx);
|
|
config->enable_ctrl1 |= BIT(25);
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(addr_start);
|
|
|
|
static ssize_t addr_stop_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
u8 idx;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
idx = config->addr_idx;
|
|
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
|
|
config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
|
|
spin_unlock(&drvdata->spinlock);
|
|
return -EPERM;
|
|
}
|
|
|
|
val = config->addr_val[idx];
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t addr_stop_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
u8 idx;
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
idx = config->addr_idx;
|
|
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
|
|
config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
|
|
spin_unlock(&drvdata->spinlock);
|
|
return -EPERM;
|
|
}
|
|
|
|
config->addr_val[idx] = val;
|
|
config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
|
|
config->startstop_ctrl |= (1 << (idx + 16));
|
|
config->enable_ctrl1 |= ETMTECR1_START_STOP;
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(addr_stop);
|
|
|
|
static ssize_t addr_acctype_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
val = config->addr_acctype[config->addr_idx];
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t addr_acctype_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
config->addr_acctype[config->addr_idx] = val;
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(addr_acctype);
|
|
|
|
static ssize_t cntr_idx_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->cntr_idx;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t cntr_idx_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (val >= drvdata->nr_cntr)
|
|
return -EINVAL;
|
|
/*
|
|
* Use spinlock to ensure index doesn't change while it gets
|
|
* dereferenced multiple times within a spinlock block elsewhere.
|
|
*/
|
|
spin_lock(&drvdata->spinlock);
|
|
config->cntr_idx = val;
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(cntr_idx);
|
|
|
|
static ssize_t cntr_rld_val_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
val = config->cntr_rld_val[config->cntr_idx];
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t cntr_rld_val_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
config->cntr_rld_val[config->cntr_idx] = val;
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(cntr_rld_val);
|
|
|
|
static ssize_t cntr_event_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
val = config->cntr_event[config->cntr_idx];
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t cntr_event_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK;
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(cntr_event);
|
|
|
|
static ssize_t cntr_rld_event_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
val = config->cntr_rld_event[config->cntr_idx];
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t cntr_rld_event_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK;
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(cntr_rld_event);
|
|
|
|
static ssize_t cntr_val_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
int i, ret = 0;
|
|
u32 val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
if (!local_read(&drvdata->mode)) {
|
|
spin_lock(&drvdata->spinlock);
|
|
for (i = 0; i < drvdata->nr_cntr; i++)
|
|
ret += sprintf(buf, "counter %d: %x\n",
|
|
i, config->cntr_val[i]);
|
|
spin_unlock(&drvdata->spinlock);
|
|
return ret;
|
|
}
|
|
|
|
for (i = 0; i < drvdata->nr_cntr; i++) {
|
|
val = etm_readl(drvdata, ETMCNTVRn(i));
|
|
ret += sprintf(buf, "counter %d: %x\n", i, val);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static ssize_t cntr_val_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
config->cntr_val[config->cntr_idx] = val;
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(cntr_val);
|
|
|
|
static ssize_t seq_12_event_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->seq_12_event;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t seq_12_event_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
config->seq_12_event = val & ETM_EVENT_MASK;
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(seq_12_event);
|
|
|
|
static ssize_t seq_21_event_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->seq_21_event;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t seq_21_event_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
config->seq_21_event = val & ETM_EVENT_MASK;
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(seq_21_event);
|
|
|
|
static ssize_t seq_23_event_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->seq_23_event;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t seq_23_event_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
config->seq_23_event = val & ETM_EVENT_MASK;
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(seq_23_event);
|
|
|
|
static ssize_t seq_31_event_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->seq_31_event;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t seq_31_event_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
config->seq_31_event = val & ETM_EVENT_MASK;
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(seq_31_event);
|
|
|
|
static ssize_t seq_32_event_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->seq_32_event;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t seq_32_event_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
config->seq_32_event = val & ETM_EVENT_MASK;
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(seq_32_event);
|
|
|
|
static ssize_t seq_13_event_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->seq_13_event;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t seq_13_event_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
config->seq_13_event = val & ETM_EVENT_MASK;
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(seq_13_event);
|
|
|
|
static ssize_t seq_curr_state_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val, flags;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
if (!local_read(&drvdata->mode)) {
|
|
val = config->seq_curr_state;
|
|
goto out;
|
|
}
|
|
|
|
pm_runtime_get_sync(drvdata->dev);
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
|
|
CS_UNLOCK(drvdata->base);
|
|
val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
|
|
CS_LOCK(drvdata->base);
|
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
pm_runtime_put(drvdata->dev);
|
|
out:
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t seq_curr_state_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (val > ETM_SEQ_STATE_MAX_VAL)
|
|
return -EINVAL;
|
|
|
|
config->seq_curr_state = val;
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(seq_curr_state);
|
|
|
|
static ssize_t ctxid_idx_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->ctxid_idx;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t ctxid_idx_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (val >= drvdata->nr_ctxid_cmp)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Use spinlock to ensure index doesn't change while it gets
|
|
* dereferenced multiple times within a spinlock block elsewhere.
|
|
*/
|
|
spin_lock(&drvdata->spinlock);
|
|
config->ctxid_idx = val;
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(ctxid_idx);
|
|
|
|
static ssize_t ctxid_pid_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
val = config->ctxid_vpid[config->ctxid_idx];
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t ctxid_pid_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long vpid, pid;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &vpid);
|
|
if (ret)
|
|
return ret;
|
|
|
|
pid = coresight_vpid_to_pid(vpid);
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
config->ctxid_pid[config->ctxid_idx] = pid;
|
|
config->ctxid_vpid[config->ctxid_idx] = vpid;
|
|
spin_unlock(&drvdata->spinlock);
|
|
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(ctxid_pid);
|
|
|
|
static ssize_t ctxid_mask_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->ctxid_mask;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t ctxid_mask_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
config->ctxid_mask = val;
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(ctxid_mask);
|
|
|
|
static ssize_t sync_freq_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->sync_freq;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t sync_freq_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
config->sync_freq = val & ETM_SYNC_MASK;
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(sync_freq);
|
|
|
|
static ssize_t timestamp_event_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
val = config->timestamp_event;
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t timestamp_event_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
struct etm_config *config = &drvdata->config;
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
config->timestamp_event = val & ETM_EVENT_MASK;
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(timestamp_event);
|
|
|
|
static ssize_t cpu_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
int val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
|
|
val = drvdata->cpu;
|
|
return scnprintf(buf, PAGE_SIZE, "%d\n", val);
|
|
|
|
}
|
|
static DEVICE_ATTR_RO(cpu);
|
|
|
|
static ssize_t traceid_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
|
|
val = etm_get_trace_id(drvdata);
|
|
|
|
return sprintf(buf, "%#lx\n", val);
|
|
}
|
|
|
|
static ssize_t traceid_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
int ret;
|
|
unsigned long val;
|
|
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
|
|
|
ret = kstrtoul(buf, 16, &val);
|
|
if (ret)
|
|
return ret;
|
|
|
|
drvdata->traceid = val & ETM_TRACEID_MASK;
|
|
return size;
|
|
}
|
|
static DEVICE_ATTR_RW(traceid);
|
|
|
|
static struct attribute *coresight_etm_attrs[] = {
|
|
&dev_attr_nr_addr_cmp.attr,
|
|
&dev_attr_nr_cntr.attr,
|
|
&dev_attr_nr_ctxid_cmp.attr,
|
|
&dev_attr_etmsr.attr,
|
|
&dev_attr_reset.attr,
|
|
&dev_attr_mode.attr,
|
|
&dev_attr_trigger_event.attr,
|
|
&dev_attr_enable_event.attr,
|
|
&dev_attr_fifofull_level.attr,
|
|
&dev_attr_addr_idx.attr,
|
|
&dev_attr_addr_single.attr,
|
|
&dev_attr_addr_range.attr,
|
|
&dev_attr_addr_start.attr,
|
|
&dev_attr_addr_stop.attr,
|
|
&dev_attr_addr_acctype.attr,
|
|
&dev_attr_cntr_idx.attr,
|
|
&dev_attr_cntr_rld_val.attr,
|
|
&dev_attr_cntr_event.attr,
|
|
&dev_attr_cntr_rld_event.attr,
|
|
&dev_attr_cntr_val.attr,
|
|
&dev_attr_seq_12_event.attr,
|
|
&dev_attr_seq_21_event.attr,
|
|
&dev_attr_seq_23_event.attr,
|
|
&dev_attr_seq_31_event.attr,
|
|
&dev_attr_seq_32_event.attr,
|
|
&dev_attr_seq_13_event.attr,
|
|
&dev_attr_seq_curr_state.attr,
|
|
&dev_attr_ctxid_idx.attr,
|
|
&dev_attr_ctxid_pid.attr,
|
|
&dev_attr_ctxid_mask.attr,
|
|
&dev_attr_sync_freq.attr,
|
|
&dev_attr_timestamp_event.attr,
|
|
&dev_attr_traceid.attr,
|
|
&dev_attr_cpu.attr,
|
|
NULL,
|
|
};
|
|
|
|
#define coresight_etm3x_simple_func(name, offset) \
|
|
coresight_simple_func(struct etm_drvdata, NULL, name, offset)
|
|
|
|
coresight_etm3x_simple_func(etmccr, ETMCCR);
|
|
coresight_etm3x_simple_func(etmccer, ETMCCER);
|
|
coresight_etm3x_simple_func(etmscr, ETMSCR);
|
|
coresight_etm3x_simple_func(etmidr, ETMIDR);
|
|
coresight_etm3x_simple_func(etmcr, ETMCR);
|
|
coresight_etm3x_simple_func(etmtraceidr, ETMTRACEIDR);
|
|
coresight_etm3x_simple_func(etmteevr, ETMTEEVR);
|
|
coresight_etm3x_simple_func(etmtssvr, ETMTSSCR);
|
|
coresight_etm3x_simple_func(etmtecr1, ETMTECR1);
|
|
coresight_etm3x_simple_func(etmtecr2, ETMTECR2);
|
|
|
|
static struct attribute *coresight_etm_mgmt_attrs[] = {
|
|
&dev_attr_etmccr.attr,
|
|
&dev_attr_etmccer.attr,
|
|
&dev_attr_etmscr.attr,
|
|
&dev_attr_etmidr.attr,
|
|
&dev_attr_etmcr.attr,
|
|
&dev_attr_etmtraceidr.attr,
|
|
&dev_attr_etmteevr.attr,
|
|
&dev_attr_etmtssvr.attr,
|
|
&dev_attr_etmtecr1.attr,
|
|
&dev_attr_etmtecr2.attr,
|
|
NULL,
|
|
};
|
|
|
|
static const struct attribute_group coresight_etm_group = {
|
|
.attrs = coresight_etm_attrs,
|
|
};
|
|
|
|
static const struct attribute_group coresight_etm_mgmt_group = {
|
|
.attrs = coresight_etm_mgmt_attrs,
|
|
.name = "mgmt",
|
|
};
|
|
|
|
const struct attribute_group *coresight_etm_groups[] = {
|
|
&coresight_etm_group,
|
|
&coresight_etm_mgmt_group,
|
|
NULL,
|
|
};
|