linux/drivers/nvmem/core.c

2229 lines
51 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* nvmem framework core.
*
* Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
* Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
*/
#include <linux/device.h>
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/slab.h>
#include "internals.h"
#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
#define FLAG_COMPAT BIT(0)
struct nvmem_cell_entry {
const char *name;
int offset;
nvmem: core: support specifying both: cell raw data & post read lengths Callback .read_post_process() is designed to modify raw cell content before providing it to the consumer. So far we were dealing with modifications that didn't affect cell size (length). In some cases however cell content needs to be reformatted and resized. It's required e.g. to provide properly formatted MAC address in case it's stored in a non-binary format (e.g. using ASCII). There were few discussions how to optimally handle that. Following possible solutions were considered: 1. Allow .read_post_process() to realloc (resize) content buffer 2. Allow .read_post_process() to adjust (decrease) just buffer length 3. Register NVMEM cells using post-read sizes The preferred solution was the last one. The problem is that simply adjusting "bytes" in NVMEM providers would result in core code NOT passing whole raw data to .read_post_process() callbacks. It means callback functions couldn't do their job without somehow manually reading original cell content on their own. This patch deals with that by registering NVMEM cells with both lengths: raw content one and post read one. It allows: 1. Core code to read whole raw cell content 2. Callbacks to return content they want Signed-off-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20230404172148.82422-35-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-04-04 17:21:42 +00:00
size_t raw_len;
int bytes;
int bit_offset;
int nbits;
nvmem_cell_post_process_t read_post_process;
void *priv;
struct device_node *np;
struct nvmem_device *nvmem;
struct list_head node;
};
struct nvmem_cell {
struct nvmem_cell_entry *entry;
const char *id;
int index;
};
static DEFINE_MUTEX(nvmem_mutex);
static DEFINE_IDA(nvmem_ida);
static DEFINE_MUTEX(nvmem_cell_mutex);
static LIST_HEAD(nvmem_cell_tables);
static DEFINE_MUTEX(nvmem_lookup_mutex);
static LIST_HEAD(nvmem_lookup_list);
static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
void *val, size_t bytes)
{
if (nvmem->reg_read)
return nvmem->reg_read(nvmem->priv, offset, val, bytes);
return -EINVAL;
}
static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
void *val, size_t bytes)
{
int ret;
if (nvmem->reg_write) {
gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
return ret;
}
return -EINVAL;
}
static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
unsigned int offset, void *val,
size_t bytes, int write)
{
unsigned int end = offset + bytes;
unsigned int kend, ksize;
const struct nvmem_keepout *keepout = nvmem->keepout;
const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
int rc;
/*
* Skip all keepouts before the range being accessed.
* Keepouts are sorted.
*/
while ((keepout < keepoutend) && (keepout->end <= offset))
keepout++;
while ((offset < end) && (keepout < keepoutend)) {
/* Access the valid portion before the keepout. */
if (offset < keepout->start) {
kend = min(end, keepout->start);
ksize = kend - offset;
if (write)
rc = __nvmem_reg_write(nvmem, offset, val, ksize);
else
rc = __nvmem_reg_read(nvmem, offset, val, ksize);
if (rc)
return rc;
offset += ksize;
val += ksize;
}
/*
* Now we're aligned to the start of this keepout zone. Go
* through it.
*/
kend = min(end, keepout->end);
ksize = kend - offset;
if (!write)
memset(val, keepout->value, ksize);
val += ksize;
offset += ksize;
keepout++;
}
/*
* If we ran out of keepouts but there's still stuff to do, send it
* down directly
*/
if (offset < end) {
ksize = end - offset;
if (write)
return __nvmem_reg_write(nvmem, offset, val, ksize);
else
return __nvmem_reg_read(nvmem, offset, val, ksize);
}
return 0;
}
static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
void *val, size_t bytes)
{
if (!nvmem->nkeepout)
return __nvmem_reg_read(nvmem, offset, val, bytes);
return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
}
static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
void *val, size_t bytes)
{
if (!nvmem->nkeepout)
return __nvmem_reg_write(nvmem, offset, val, bytes);
return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
}
#ifdef CONFIG_NVMEM_SYSFS
static const char * const nvmem_type_str[] = {
[NVMEM_TYPE_UNKNOWN] = "Unknown",
[NVMEM_TYPE_EEPROM] = "EEPROM",
[NVMEM_TYPE_OTP] = "OTP",
[NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
[NVMEM_TYPE_FRAM] = "FRAM",
};
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key eeprom_lock_key;
#endif
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvmem_device *nvmem = to_nvmem_device(dev);
return sysfs_emit(buf, "%s\n", nvmem_type_str[nvmem->type]);
}
static DEVICE_ATTR_RO(type);
static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct nvmem_device *nvmem = to_nvmem_device(dev);
return sysfs_emit(buf, "%d\n", nvmem->read_only);
}
static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nvmem_device *nvmem = to_nvmem_device(dev);
int ret = kstrtobool(buf, &nvmem->read_only);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR_RW(force_ro);
static struct attribute *nvmem_attrs[] = {
&dev_attr_force_ro.attr,
&dev_attr_type.attr,
NULL,
};
static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev;
struct nvmem_device *nvmem;
int rc;
if (attr->private)
dev = attr->private;
else
dev = kobj_to_dev(kobj);
nvmem = to_nvmem_device(dev);
if (!IS_ALIGNED(pos, nvmem->stride))
return -EINVAL;
if (count < nvmem->word_size)
return -EINVAL;
count = round_down(count, nvmem->word_size);
if (!nvmem->reg_read)
return -EPERM;
rc = nvmem_reg_read(nvmem, pos, buf, count);
if (rc)
return rc;
return count;
}
static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev;
struct nvmem_device *nvmem;
int rc;
if (attr->private)
dev = attr->private;
else
dev = kobj_to_dev(kobj);
nvmem = to_nvmem_device(dev);
if (!IS_ALIGNED(pos, nvmem->stride))
return -EINVAL;
if (count < nvmem->word_size)
return -EINVAL;
count = round_down(count, nvmem->word_size);
if (!nvmem->reg_write)
return -EPERM;
rc = nvmem_reg_write(nvmem, pos, buf, count);
if (rc)
return rc;
return count;
}
static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
{
umode_t mode = 0400;
if (!nvmem->root_only)
mode |= 0044;
if (!nvmem->read_only)
mode |= 0200;
if (!nvmem->reg_write)
mode &= ~0200;
if (!nvmem->reg_read)
mode &= ~0444;
return mode;
}
static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
struct bin_attribute *attr, int i)
{
struct device *dev = kobj_to_dev(kobj);
struct nvmem_device *nvmem = to_nvmem_device(dev);
attr->size = nvmem->size;
return nvmem_bin_attr_get_umode(nvmem);
}
static umode_t nvmem_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *dev = kobj_to_dev(kobj);
struct nvmem_device *nvmem = to_nvmem_device(dev);
/*
* If the device has no .reg_write operation, do not allow
* configuration as read-write.
* If the device is set as read-only by configuration, it
* can be forced into read-write mode using the 'force_ro'
* attribute.
*/
if (attr == &dev_attr_force_ro.attr && !nvmem->reg_write)
return 0; /* Attribute not visible */
return attr->mode;
}
nvmem: core: Expose cells through sysfs The binary content of nvmem devices is available to the user so in the easiest cases, finding the content of a cell is rather easy as it is just a matter of looking at a known and fixed offset. However, nvmem layouts have been recently introduced to cope with more advanced situations, where the offset and size of the cells is not known in advance or is dynamic. When using layouts, more advanced parsers are used by the kernel in order to give direct access to the content of each cell, regardless of its position/size in the underlying device. Unfortunately, these information are not accessible by users, unless by fully re-implementing the parser logic in userland. Let's expose the cells and their content through sysfs to avoid these situations. Of course the relevant NVMEM sysfs Kconfig option must be enabled for this support to be available. Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute group member will be filled at runtime only when relevant and will remain empty otherwise. In this case, as the cells attribute group will be empty, it will not lead to any additional folder/file creation. Exposed cells are read-only. There is, in practice, everything in the core to support a write path, but as I don't see any need for that, I prefer to keep the interface simple (and probably safer). The interface is documented as being in the "testing" state which means we can later add a write attribute if though relevant. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Tested-by: Chen-Yu Tsai <wenst@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-9-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:34 +00:00
static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
const char *id, int index);
static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct nvmem_cell_entry *entry;
struct nvmem_cell *cell = NULL;
size_t cell_sz, read_len;
void *content;
entry = attr->private;
cell = nvmem_create_cell(entry, entry->name, 0);
if (IS_ERR(cell))
return PTR_ERR(cell);
if (!cell)
return -EINVAL;
content = nvmem_cell_read(cell, &cell_sz);
if (IS_ERR(content)) {
read_len = PTR_ERR(content);
goto destroy_cell;
}
read_len = min_t(unsigned int, cell_sz - pos, count);
memcpy(buf, content + pos, read_len);
kfree(content);
destroy_cell:
kfree_const(cell->id);
kfree(cell);
return read_len;
}
/* default read/write permissions */
static struct bin_attribute bin_attr_rw_nvmem = {
.attr = {
.name = "nvmem",
.mode = 0644,
},
.read = bin_attr_nvmem_read,
.write = bin_attr_nvmem_write,
};
static struct bin_attribute *nvmem_bin_attributes[] = {
&bin_attr_rw_nvmem,
NULL,
};
static const struct attribute_group nvmem_bin_group = {
.bin_attrs = nvmem_bin_attributes,
.attrs = nvmem_attrs,
.is_bin_visible = nvmem_bin_attr_is_visible,
.is_visible = nvmem_attr_is_visible,
nvmem: core: Expose cells through sysfs The binary content of nvmem devices is available to the user so in the easiest cases, finding the content of a cell is rather easy as it is just a matter of looking at a known and fixed offset. However, nvmem layouts have been recently introduced to cope with more advanced situations, where the offset and size of the cells is not known in advance or is dynamic. When using layouts, more advanced parsers are used by the kernel in order to give direct access to the content of each cell, regardless of its position/size in the underlying device. Unfortunately, these information are not accessible by users, unless by fully re-implementing the parser logic in userland. Let's expose the cells and their content through sysfs to avoid these situations. Of course the relevant NVMEM sysfs Kconfig option must be enabled for this support to be available. Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute group member will be filled at runtime only when relevant and will remain empty otherwise. In this case, as the cells attribute group will be empty, it will not lead to any additional folder/file creation. Exposed cells are read-only. There is, in practice, everything in the core to support a write path, but as I don't see any need for that, I prefer to keep the interface simple (and probably safer). The interface is documented as being in the "testing" state which means we can later add a write attribute if though relevant. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Tested-by: Chen-Yu Tsai <wenst@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-9-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:34 +00:00
};
static const struct attribute_group *nvmem_dev_groups[] = {
&nvmem_bin_group,
NULL,
};
static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
.attr = {
.name = "eeprom",
},
.read = bin_attr_nvmem_read,
.write = bin_attr_nvmem_write,
};
/*
* nvmem_setup_compat() - Create an additional binary entry in
* drivers sys directory, to be backwards compatible with the older
* drivers/misc/eeprom drivers.
*/
static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
const struct nvmem_config *config)
{
int rval;
if (!config->compat)
return 0;
if (!config->base_dev)
return -EINVAL;
nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
if (config->type == NVMEM_TYPE_FRAM)
nvmem->eeprom.attr.name = "fram";
nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
nvmem->eeprom.size = nvmem->size;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
nvmem->eeprom.attr.key = &eeprom_lock_key;
#endif
nvmem->eeprom.private = &nvmem->dev;
nvmem->base_dev = config->base_dev;
rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
if (rval) {
dev_err(&nvmem->dev,
"Failed to create eeprom binary file %d\n", rval);
return rval;
}
nvmem->flags |= FLAG_COMPAT;
return 0;
}
static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
const struct nvmem_config *config)
{
if (config->compat)
device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
}
nvmem: core: Expose cells through sysfs The binary content of nvmem devices is available to the user so in the easiest cases, finding the content of a cell is rather easy as it is just a matter of looking at a known and fixed offset. However, nvmem layouts have been recently introduced to cope with more advanced situations, where the offset and size of the cells is not known in advance or is dynamic. When using layouts, more advanced parsers are used by the kernel in order to give direct access to the content of each cell, regardless of its position/size in the underlying device. Unfortunately, these information are not accessible by users, unless by fully re-implementing the parser logic in userland. Let's expose the cells and their content through sysfs to avoid these situations. Of course the relevant NVMEM sysfs Kconfig option must be enabled for this support to be available. Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute group member will be filled at runtime only when relevant and will remain empty otherwise. In this case, as the cells attribute group will be empty, it will not lead to any additional folder/file creation. Exposed cells are read-only. There is, in practice, everything in the core to support a write path, but as I don't see any need for that, I prefer to keep the interface simple (and probably safer). The interface is documented as being in the "testing" state which means we can later add a write attribute if though relevant. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Tested-by: Chen-Yu Tsai <wenst@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-9-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:34 +00:00
static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
{
struct attribute_group group = {
.name = "cells",
};
nvmem: core: Expose cells through sysfs The binary content of nvmem devices is available to the user so in the easiest cases, finding the content of a cell is rather easy as it is just a matter of looking at a known and fixed offset. However, nvmem layouts have been recently introduced to cope with more advanced situations, where the offset and size of the cells is not known in advance or is dynamic. When using layouts, more advanced parsers are used by the kernel in order to give direct access to the content of each cell, regardless of its position/size in the underlying device. Unfortunately, these information are not accessible by users, unless by fully re-implementing the parser logic in userland. Let's expose the cells and their content through sysfs to avoid these situations. Of course the relevant NVMEM sysfs Kconfig option must be enabled for this support to be available. Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute group member will be filled at runtime only when relevant and will remain empty otherwise. In this case, as the cells attribute group will be empty, it will not lead to any additional folder/file creation. Exposed cells are read-only. There is, in practice, everything in the core to support a write path, but as I don't see any need for that, I prefer to keep the interface simple (and probably safer). The interface is documented as being in the "testing" state which means we can later add a write attribute if though relevant. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Tested-by: Chen-Yu Tsai <wenst@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-9-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:34 +00:00
struct nvmem_cell_entry *entry;
struct bin_attribute *attrs;
nvmem: core: Expose cells through sysfs The binary content of nvmem devices is available to the user so in the easiest cases, finding the content of a cell is rather easy as it is just a matter of looking at a known and fixed offset. However, nvmem layouts have been recently introduced to cope with more advanced situations, where the offset and size of the cells is not known in advance or is dynamic. When using layouts, more advanced parsers are used by the kernel in order to give direct access to the content of each cell, regardless of its position/size in the underlying device. Unfortunately, these information are not accessible by users, unless by fully re-implementing the parser logic in userland. Let's expose the cells and their content through sysfs to avoid these situations. Of course the relevant NVMEM sysfs Kconfig option must be enabled for this support to be available. Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute group member will be filled at runtime only when relevant and will remain empty otherwise. In this case, as the cells attribute group will be empty, it will not lead to any additional folder/file creation. Exposed cells are read-only. There is, in practice, everything in the core to support a write path, but as I don't see any need for that, I prefer to keep the interface simple (and probably safer). The interface is documented as being in the "testing" state which means we can later add a write attribute if though relevant. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Tested-by: Chen-Yu Tsai <wenst@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-9-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:34 +00:00
unsigned int ncells = 0, i = 0;
int ret = 0;
mutex_lock(&nvmem_mutex);
if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated)
nvmem: core: Expose cells through sysfs The binary content of nvmem devices is available to the user so in the easiest cases, finding the content of a cell is rather easy as it is just a matter of looking at a known and fixed offset. However, nvmem layouts have been recently introduced to cope with more advanced situations, where the offset and size of the cells is not known in advance or is dynamic. When using layouts, more advanced parsers are used by the kernel in order to give direct access to the content of each cell, regardless of its position/size in the underlying device. Unfortunately, these information are not accessible by users, unless by fully re-implementing the parser logic in userland. Let's expose the cells and their content through sysfs to avoid these situations. Of course the relevant NVMEM sysfs Kconfig option must be enabled for this support to be available. Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute group member will be filled at runtime only when relevant and will remain empty otherwise. In this case, as the cells attribute group will be empty, it will not lead to any additional folder/file creation. Exposed cells are read-only. There is, in practice, everything in the core to support a write path, but as I don't see any need for that, I prefer to keep the interface simple (and probably safer). The interface is documented as being in the "testing" state which means we can later add a write attribute if though relevant. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Tested-by: Chen-Yu Tsai <wenst@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-9-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:34 +00:00
goto unlock_mutex;
/* Allocate an array of attributes with a sentinel */
ncells = list_count_nodes(&nvmem->cells);
group.bin_attrs = devm_kcalloc(&nvmem->dev, ncells + 1,
sizeof(struct bin_attribute *), GFP_KERNEL);
if (!group.bin_attrs) {
nvmem: core: Expose cells through sysfs The binary content of nvmem devices is available to the user so in the easiest cases, finding the content of a cell is rather easy as it is just a matter of looking at a known and fixed offset. However, nvmem layouts have been recently introduced to cope with more advanced situations, where the offset and size of the cells is not known in advance or is dynamic. When using layouts, more advanced parsers are used by the kernel in order to give direct access to the content of each cell, regardless of its position/size in the underlying device. Unfortunately, these information are not accessible by users, unless by fully re-implementing the parser logic in userland. Let's expose the cells and their content through sysfs to avoid these situations. Of course the relevant NVMEM sysfs Kconfig option must be enabled for this support to be available. Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute group member will be filled at runtime only when relevant and will remain empty otherwise. In this case, as the cells attribute group will be empty, it will not lead to any additional folder/file creation. Exposed cells are read-only. There is, in practice, everything in the core to support a write path, but as I don't see any need for that, I prefer to keep the interface simple (and probably safer). The interface is documented as being in the "testing" state which means we can later add a write attribute if though relevant. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Tested-by: Chen-Yu Tsai <wenst@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-9-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:34 +00:00
ret = -ENOMEM;
goto unlock_mutex;
}
attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL);
if (!attrs) {
ret = -ENOMEM;
goto unlock_mutex;
}
/* Initialize each attribute to take the name and size of the cell */
list_for_each_entry(entry, &nvmem->cells, node) {
sysfs_bin_attr_init(&attrs[i]);
attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL,
nvmem: include bit index in cell sysfs file name Creating sysfs files for all Cells caused a boot failure for linux-6.8-rc1 on Apple M1, which (in downstream dts files) has multiple nvmem cells that use the same byte address. This causes the device probe to fail with [ 0.605336] sysfs: cannot create duplicate filename '/devices/platform/soc@200000000/2922bc000.efuse/apple_efuses_nvmem0/cells/efuse@a10' [ 0.605347] CPU: 7 PID: 1 Comm: swapper/0 Tainted: G S 6.8.0-rc1-arnd-5+ #133 [ 0.605355] Hardware name: Apple Mac Studio (M1 Ultra, 2022) (DT) [ 0.605362] Call trace: [ 0.605365] show_stack+0x18/0x2c [ 0.605374] dump_stack_lvl+0x60/0x80 [ 0.605383] dump_stack+0x18/0x24 [ 0.605388] sysfs_warn_dup+0x64/0x80 [ 0.605395] sysfs_add_bin_file_mode_ns+0xb0/0xd4 [ 0.605402] internal_create_group+0x268/0x404 [ 0.605409] sysfs_create_groups+0x38/0x94 [ 0.605415] devm_device_add_groups+0x50/0x94 [ 0.605572] nvmem_populate_sysfs_cells+0x180/0x1b0 [ 0.605682] nvmem_register+0x38c/0x470 [ 0.605789] devm_nvmem_register+0x1c/0x6c [ 0.605895] apple_efuses_probe+0xe4/0x120 [ 0.606000] platform_probe+0xa8/0xd0 As far as I can tell, this is a problem for any device with multiple cells on different bits of the same address. Avoid the issue by changing the file name to include the first bit number. Fixes: 0331c611949f ("nvmem: core: Expose cells through sysfs") Link: https://github.com/AsahiLinux/linux/blob/bd0a1a7d4/arch/arm64/boot/dts/apple/t600x-dieX.dtsi#L156 Cc: <regressions@lists.linux.dev> Cc: Miquel Raynal <miquel.raynal@bootlin.com> Cc: Rafał Miłecki <rafal@milecki.pl> Cc: Chen-Yu Tsai <wenst@chromium.org> Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: <asahi@lists.linux.dev> Cc: Sven Peter <sven@svenpeter.dev> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Reviewed-by: Eric Curtin <ecurtin@redhat.com> Reviewed-by: Miquel Raynal <miquel.raynal@bootlin.com> Link: https://lore.kernel.org/r/20240209163454.98051-1-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2024-02-09 16:34:54 +00:00
"%s@%x,%x", entry->name,
entry->offset,
entry->bit_offset);
attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem);
nvmem: core: Expose cells through sysfs The binary content of nvmem devices is available to the user so in the easiest cases, finding the content of a cell is rather easy as it is just a matter of looking at a known and fixed offset. However, nvmem layouts have been recently introduced to cope with more advanced situations, where the offset and size of the cells is not known in advance or is dynamic. When using layouts, more advanced parsers are used by the kernel in order to give direct access to the content of each cell, regardless of its position/size in the underlying device. Unfortunately, these information are not accessible by users, unless by fully re-implementing the parser logic in userland. Let's expose the cells and their content through sysfs to avoid these situations. Of course the relevant NVMEM sysfs Kconfig option must be enabled for this support to be available. Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute group member will be filled at runtime only when relevant and will remain empty otherwise. In this case, as the cells attribute group will be empty, it will not lead to any additional folder/file creation. Exposed cells are read-only. There is, in practice, everything in the core to support a write path, but as I don't see any need for that, I prefer to keep the interface simple (and probably safer). The interface is documented as being in the "testing" state which means we can later add a write attribute if though relevant. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Tested-by: Chen-Yu Tsai <wenst@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-9-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:34 +00:00
attrs[i].size = entry->bytes;
attrs[i].read = &nvmem_cell_attr_read;
attrs[i].private = entry;
if (!attrs[i].attr.name) {
ret = -ENOMEM;
goto unlock_mutex;
}
group.bin_attrs[i] = &attrs[i];
nvmem: core: Expose cells through sysfs The binary content of nvmem devices is available to the user so in the easiest cases, finding the content of a cell is rather easy as it is just a matter of looking at a known and fixed offset. However, nvmem layouts have been recently introduced to cope with more advanced situations, where the offset and size of the cells is not known in advance or is dynamic. When using layouts, more advanced parsers are used by the kernel in order to give direct access to the content of each cell, regardless of its position/size in the underlying device. Unfortunately, these information are not accessible by users, unless by fully re-implementing the parser logic in userland. Let's expose the cells and their content through sysfs to avoid these situations. Of course the relevant NVMEM sysfs Kconfig option must be enabled for this support to be available. Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute group member will be filled at runtime only when relevant and will remain empty otherwise. In this case, as the cells attribute group will be empty, it will not lead to any additional folder/file creation. Exposed cells are read-only. There is, in practice, everything in the core to support a write path, but as I don't see any need for that, I prefer to keep the interface simple (and probably safer). The interface is documented as being in the "testing" state which means we can later add a write attribute if though relevant. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Tested-by: Chen-Yu Tsai <wenst@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-9-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:34 +00:00
i++;
}
ret = device_add_group(&nvmem->dev, &group);
nvmem: core: Expose cells through sysfs The binary content of nvmem devices is available to the user so in the easiest cases, finding the content of a cell is rather easy as it is just a matter of looking at a known and fixed offset. However, nvmem layouts have been recently introduced to cope with more advanced situations, where the offset and size of the cells is not known in advance or is dynamic. When using layouts, more advanced parsers are used by the kernel in order to give direct access to the content of each cell, regardless of its position/size in the underlying device. Unfortunately, these information are not accessible by users, unless by fully re-implementing the parser logic in userland. Let's expose the cells and their content through sysfs to avoid these situations. Of course the relevant NVMEM sysfs Kconfig option must be enabled for this support to be available. Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute group member will be filled at runtime only when relevant and will remain empty otherwise. In this case, as the cells attribute group will be empty, it will not lead to any additional folder/file creation. Exposed cells are read-only. There is, in practice, everything in the core to support a write path, but as I don't see any need for that, I prefer to keep the interface simple (and probably safer). The interface is documented as being in the "testing" state which means we can later add a write attribute if though relevant. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Tested-by: Chen-Yu Tsai <wenst@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-9-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:34 +00:00
if (ret)
goto unlock_mutex;
nvmem->sysfs_cells_populated = true;
unlock_mutex:
mutex_unlock(&nvmem_mutex);
return ret;
}
#else /* CONFIG_NVMEM_SYSFS */
static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
const struct nvmem_config *config)
{
return -ENOSYS;
}
static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
const struct nvmem_config *config)
{
}
#endif /* CONFIG_NVMEM_SYSFS */
static void nvmem_release(struct device *dev)
{
struct nvmem_device *nvmem = to_nvmem_device(dev);
ida_free(&nvmem_ida, nvmem->id);
gpiod_put(nvmem->wp_gpio);
kfree(nvmem);
}
static const struct device_type nvmem_provider_type = {
.release = nvmem_release,
};
static struct bus_type nvmem_bus_type = {
.name = "nvmem",
};
static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
{
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
mutex_lock(&nvmem_mutex);
list_del(&cell->node);
mutex_unlock(&nvmem_mutex);
of_node_put(cell->np);
nvmem: core: fix memory abort in cleanup path nvmem_cell_info_to_nvmem_cell implementation has static allocation of name. nvmem_add_cells_from_of() call may return error and kfree name results in memory abort. Use kstrdup_const() and kfree_const calls for name alloc and free. Unable to handle kernel paging request at virtual address ffffffffffe44888 Mem abort info: ESR = 0x96000006 EC = 0x25: DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000006 CM = 0, WnR = 0 swapper pgtable: 64k pages, 48-bit VAs, pgdp=00000000815d0000 [ffffffffffe44888] pgd=0000000081d30803, pud=0000000081d30803, pmd=0000000000000000 Internal error: Oops: 96000006 [#1] PREEMPT SMP Modules linked in: CPU: 2 PID: 43 Comm: kworker/2:1 Tainted Hardware name: quill (DT) Workqueue: events deferred_probe_work_func pstate: a0000005 (NzCv daif -PAN -UAO) pc : kfree+0x38/0x278 lr : nvmem_cell_drop+0x68/0x80 sp : ffff80001284f9d0 x29: ffff80001284f9d0 x28: ffff0001f677e830 x27: ffff800011b0b000 x26: ffff0001c36e1008 x25: ffff8000112ad000 x24: ffff8000112c9000 x23: ffffffffffffffea x22: ffff800010adc7f0 x21: ffffffffffe44880 x20: ffff800011b0b068 x19: ffff80001122d380 x18: ffffffffffffffff x17: 00000000d5cb4756 x16: 0000000070b193b8 x15: ffff8000119538c8 x14: 0720072007200720 x13: 07200720076e0772 x12: 07750762072d0765 x11: 0773077507660765 x10: 072f073007300730 x9 : 0730073207380733 x8 : 0000000000000151 x7 : 07660765072f0720 x6 : ffff0001c00e0f00 x5 : 0000000000000000 x4 : ffff0001c0b43800 x3 : ffff800011b0b068 x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffffffdfffe00000 Call trace: kfree+0x38/0x278 nvmem_cell_drop+0x68/0x80 nvmem_device_remove_all_cells+0x2c/0x50 nvmem_register.part.9+0x520/0x628 devm_nvmem_register+0x48/0xa0 tegra_fuse_probe+0x140/0x1f0 platform_drv_probe+0x50/0xa0 really_probe+0x108/0x348 driver_probe_device+0x58/0x100 __device_attach_driver+0x90/0xb0 bus_for_each_drv+0x64/0xc8 __device_attach+0xd8/0x138 device_initial_probe+0x10/0x18 bus_probe_device+0x90/0x98 deferred_probe_work_func+0x74/0xb0 process_one_work+0x1e0/0x358 worker_thread+0x208/0x488 kthread+0x118/0x120 ret_from_fork+0x10/0x18 Code: d350feb5 f2dffbe0 aa1e03f6 8b151815 (f94006a0) ---[ end trace 49b1303c6b83198e ]--- Fixes: badcdff107cbf ("nvmem: Convert to using %pOFn instead of device_node.name") Signed-off-by: Bitan Biswas <bbiswas@nvidia.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20200109104017.6249-5-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2020-01-09 10:40:17 +00:00
kfree_const(cell->name);
kfree(cell);
}
static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
{
struct nvmem_cell_entry *cell, *p;
list_for_each_entry_safe(cell, p, &nvmem->cells, node)
nvmem_cell_entry_drop(cell);
}
static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
{
mutex_lock(&nvmem_mutex);
list_add_tail(&cell->node, &cell->nvmem->cells);
mutex_unlock(&nvmem_mutex);
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
}
static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
const struct nvmem_cell_info *info,
struct nvmem_cell_entry *cell)
{
cell->nvmem = nvmem;
cell->offset = info->offset;
nvmem: core: support specifying both: cell raw data & post read lengths Callback .read_post_process() is designed to modify raw cell content before providing it to the consumer. So far we were dealing with modifications that didn't affect cell size (length). In some cases however cell content needs to be reformatted and resized. It's required e.g. to provide properly formatted MAC address in case it's stored in a non-binary format (e.g. using ASCII). There were few discussions how to optimally handle that. Following possible solutions were considered: 1. Allow .read_post_process() to realloc (resize) content buffer 2. Allow .read_post_process() to adjust (decrease) just buffer length 3. Register NVMEM cells using post-read sizes The preferred solution was the last one. The problem is that simply adjusting "bytes" in NVMEM providers would result in core code NOT passing whole raw data to .read_post_process() callbacks. It means callback functions couldn't do their job without somehow manually reading original cell content on their own. This patch deals with that by registering NVMEM cells with both lengths: raw content one and post read one. It allows: 1. Core code to read whole raw cell content 2. Callbacks to return content they want Signed-off-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20230404172148.82422-35-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-04-04 17:21:42 +00:00
cell->raw_len = info->raw_len ?: info->bytes;
cell->bytes = info->bytes;
cell->name = info->name;
cell->read_post_process = info->read_post_process;
cell->priv = info->priv;
cell->bit_offset = info->bit_offset;
cell->nbits = info->nbits;
cell->np = info->np;
if (cell->nbits)
cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
BITS_PER_BYTE);
if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
dev_err(&nvmem->dev,
"cell %s unaligned to nvmem stride %d\n",
cell->name ?: "<unknown>", nvmem->stride);
return -EINVAL;
}
return 0;
}
static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
const struct nvmem_cell_info *info,
struct nvmem_cell_entry *cell)
{
int err;
err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
if (err)
return err;
cell->name = kstrdup_const(info->name, GFP_KERNEL);
if (!cell->name)
return -ENOMEM;
return 0;
}
/**
* nvmem_add_one_cell() - Add one cell information to an nvmem device
*
* @nvmem: nvmem device to add cells to.
* @info: nvmem cell info to add to the device
*
* Return: 0 or negative error code on failure.
*/
int nvmem_add_one_cell(struct nvmem_device *nvmem,
const struct nvmem_cell_info *info)
{
struct nvmem_cell_entry *cell;
int rval;
cell = kzalloc(sizeof(*cell), GFP_KERNEL);
if (!cell)
return -ENOMEM;
rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
if (rval) {
kfree(cell);
return rval;
}
nvmem_cell_entry_add(cell);
return 0;
}
EXPORT_SYMBOL_GPL(nvmem_add_one_cell);
/**
* nvmem_add_cells() - Add cell information to an nvmem device
*
* @nvmem: nvmem device to add cells to.
* @info: nvmem cell info to add to the device
* @ncells: number of cells in info
*
* Return: 0 or negative error code on failure.
*/
static int nvmem_add_cells(struct nvmem_device *nvmem,
const struct nvmem_cell_info *info,
int ncells)
{
int i, rval;
for (i = 0; i < ncells; i++) {
rval = nvmem_add_one_cell(nvmem, &info[i]);
if (rval)
return rval;
}
return 0;
}
/**
* nvmem_register_notifier() - Register a notifier block for nvmem events.
*
* @nb: notifier block to be called on nvmem events.
*
* Return: 0 on success, negative error number on failure.
*/
int nvmem_register_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&nvmem_notifier, nb);
}
EXPORT_SYMBOL_GPL(nvmem_register_notifier);
/**
* nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
*
* @nb: notifier block to be unregistered.
*
* Return: 0 on success, negative error number on failure.
*/
int nvmem_unregister_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
}
EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
{
const struct nvmem_cell_info *info;
struct nvmem_cell_table *table;
struct nvmem_cell_entry *cell;
int rval = 0, i;
mutex_lock(&nvmem_cell_mutex);
list_for_each_entry(table, &nvmem_cell_tables, node) {
if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
for (i = 0; i < table->ncells; i++) {
info = &table->cells[i];
cell = kzalloc(sizeof(*cell), GFP_KERNEL);
if (!cell) {
rval = -ENOMEM;
goto out;
}
rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
if (rval) {
kfree(cell);
goto out;
}
nvmem_cell_entry_add(cell);
}
}
}
out:
mutex_unlock(&nvmem_cell_mutex);
return rval;
}
static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
{
struct nvmem_cell_entry *iter, *cell = NULL;
mutex_lock(&nvmem_mutex);
list_for_each_entry(iter, &nvmem->cells, node) {
if (strcmp(cell_id, iter->name) == 0) {
cell = iter;
break;
}
}
mutex_unlock(&nvmem_mutex);
return cell;
}
static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
{
unsigned int cur = 0;
const struct nvmem_keepout *keepout = nvmem->keepout;
const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
while (keepout < keepoutend) {
/* Ensure keepouts are sorted and don't overlap. */
if (keepout->start < cur) {
dev_err(&nvmem->dev,
"Keepout regions aren't sorted or overlap.\n");
return -ERANGE;
}
if (keepout->end < keepout->start) {
dev_err(&nvmem->dev,
"Invalid keepout region.\n");
return -EINVAL;
}
/*
* Validate keepouts (and holes between) don't violate
* word_size constraints.
*/
if ((keepout->end - keepout->start < nvmem->word_size) ||
((keepout->start != cur) &&
(keepout->start - cur < nvmem->word_size))) {
dev_err(&nvmem->dev,
"Keepout regions violate word_size constraints.\n");
return -ERANGE;
}
/* Validate keepouts don't violate stride (alignment). */
if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
!IS_ALIGNED(keepout->end, nvmem->stride)) {
dev_err(&nvmem->dev,
"Keepout regions violate stride.\n");
return -EINVAL;
}
cur = keepout->end;
keepout++;
}
return 0;
}
static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np)
{
struct device *dev = &nvmem->dev;
struct device_node *child;
const __be32 *addr;
int len, ret;
for_each_child_of_node(np, child) {
struct nvmem_cell_info info = {0};
addr = of_get_property(child, "reg", &len);
nvmem: core: skip child nodes not matching binding The nvmem cell binding applies to all eeprom child nodes matching "^.*@[0-9a-f]+$" without taking a compatible into account. Linux drivers, like at24, are even more extensive and assume _all_ at24 eeprom child nodes to be nvmem cells since e888d445ac33 ("nvmem: resolve cells from DT at registration time"). Since df5f3b6f5357 ("dt-bindings: nvmem: stm32: new property for data access"), the additionalProperties: True means it's Ok to have other properties as long as they don't match "^.*@[0-9a-f]+$". The barebox bootloader extends the MTD partitions binding to EEPROM and can fix up following device tree node: &eeprom { partitions { compatible = "fixed-partitions"; }; }; This is allowed binding-wise, but drivers using nvmem_register() like at24 will fail to parse because the function expects all child nodes to have a reg property present. This results in the whole EEPROM driver probe failing despite the device tree being correct. Fix this by skipping nodes lacking a reg property instead of returning an error. This effectively makes the drivers adhere to the binding because all nodes with a unit address must have a reg property and vice versa. Fixes: e888d445ac33 ("nvmem: resolve cells from DT at registration time"). Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20210129171430.11328-6-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2021-01-29 17:14:30 +00:00
if (!addr)
continue;
if (len < 2 * sizeof(u32)) {
dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
of_node_put(child);
return -EINVAL;
}
info.offset = be32_to_cpup(addr++);
info.bytes = be32_to_cpup(addr);
info.name = kasprintf(GFP_KERNEL, "%pOFn", child);
addr = of_get_property(child, "bits", &len);
if (addr && len == (2 * sizeof(u32))) {
info.bit_offset = be32_to_cpup(addr++);
info.nbits = be32_to_cpup(addr);
if (info.bit_offset >= BITS_PER_BYTE || info.nbits < 1) {
dev_err(dev, "nvmem: invalid bits on %pOF\n", child);
of_node_put(child);
return -EINVAL;
}
}
info.np = of_node_get(child);
if (nvmem->fixup_dt_cell_info)
nvmem->fixup_dt_cell_info(nvmem, &info);
ret = nvmem_add_one_cell(nvmem, &info);
kfree(info.name);
if (ret) {
of_node_put(child);
return ret;
}
}
return 0;
}
static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem)
{
return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node);
}
static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem)
{
struct device_node *layout_np;
int err = 0;
layout_np = of_nvmem_layout_get_container(nvmem);
if (!layout_np)
return 0;
if (of_device_is_compatible(layout_np, "fixed-layout"))
err = nvmem_add_cells_from_dt(nvmem, layout_np);
of_node_put(layout_np);
return err;
}
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
int nvmem_layout_register(struct nvmem_layout *layout)
{
nvmem: core: Expose cells through sysfs The binary content of nvmem devices is available to the user so in the easiest cases, finding the content of a cell is rather easy as it is just a matter of looking at a known and fixed offset. However, nvmem layouts have been recently introduced to cope with more advanced situations, where the offset and size of the cells is not known in advance or is dynamic. When using layouts, more advanced parsers are used by the kernel in order to give direct access to the content of each cell, regardless of its position/size in the underlying device. Unfortunately, these information are not accessible by users, unless by fully re-implementing the parser logic in userland. Let's expose the cells and their content through sysfs to avoid these situations. Of course the relevant NVMEM sysfs Kconfig option must be enabled for this support to be available. Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute group member will be filled at runtime only when relevant and will remain empty otherwise. In this case, as the cells attribute group will be empty, it will not lead to any additional folder/file creation. Exposed cells are read-only. There is, in practice, everything in the core to support a write path, but as I don't see any need for that, I prefer to keep the interface simple (and probably safer). The interface is documented as being in the "testing" state which means we can later add a write attribute if though relevant. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Tested-by: Chen-Yu Tsai <wenst@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-9-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:34 +00:00
int ret;
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
if (!layout->add_cells)
return -EINVAL;
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
/* Populate the cells */
ret = layout->add_cells(layout);
nvmem: core: Expose cells through sysfs The binary content of nvmem devices is available to the user so in the easiest cases, finding the content of a cell is rather easy as it is just a matter of looking at a known and fixed offset. However, nvmem layouts have been recently introduced to cope with more advanced situations, where the offset and size of the cells is not known in advance or is dynamic. When using layouts, more advanced parsers are used by the kernel in order to give direct access to the content of each cell, regardless of its position/size in the underlying device. Unfortunately, these information are not accessible by users, unless by fully re-implementing the parser logic in userland. Let's expose the cells and their content through sysfs to avoid these situations. Of course the relevant NVMEM sysfs Kconfig option must be enabled for this support to be available. Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute group member will be filled at runtime only when relevant and will remain empty otherwise. In this case, as the cells attribute group will be empty, it will not lead to any additional folder/file creation. Exposed cells are read-only. There is, in practice, everything in the core to support a write path, but as I don't see any need for that, I prefer to keep the interface simple (and probably safer). The interface is documented as being in the "testing" state which means we can later add a write attribute if though relevant. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Tested-by: Chen-Yu Tsai <wenst@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-9-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:34 +00:00
if (ret)
return ret;
#ifdef CONFIG_NVMEM_SYSFS
ret = nvmem_populate_sysfs_cells(layout->nvmem);
if (ret) {
nvmem_device_remove_all_cells(layout->nvmem);
return ret;
}
#endif
return 0;
}
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
EXPORT_SYMBOL_GPL(nvmem_layout_register);
void nvmem_layout_unregister(struct nvmem_layout *layout)
{
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
/* Keep the API even with an empty stub in case we need it later */
}
EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
/**
* nvmem_register() - Register a nvmem device for given nvmem_config.
* Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
*
* @config: nvmem device configuration with which nvmem device is created.
*
* Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
* on success.
*/
struct nvmem_device *nvmem_register(const struct nvmem_config *config)
{
struct nvmem_device *nvmem;
int rval;
if (!config->dev)
return ERR_PTR(-EINVAL);
if (!config->reg_read && !config->reg_write)
return ERR_PTR(-EINVAL);
nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
if (!nvmem)
return ERR_PTR(-ENOMEM);
rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
if (rval < 0) {
kfree(nvmem);
return ERR_PTR(rval);
}
nvmem->id = rval;
nvmem->dev.type = &nvmem_provider_type;
nvmem->dev.bus = &nvmem_bus_type;
nvmem->dev.parent = config->dev;
device_initialize(&nvmem->dev);
if (!config->ignore_wp)
nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
GPIOD_OUT_HIGH);
if (IS_ERR(nvmem->wp_gpio)) {
rval = PTR_ERR(nvmem->wp_gpio);
nvmem->wp_gpio = NULL;
goto err_put_device;
}
kref_init(&nvmem->refcnt);
INIT_LIST_HEAD(&nvmem->cells);
nvmem->fixup_dt_cell_info = config->fixup_dt_cell_info;
nvmem->owner = config->owner;
if (!nvmem->owner && config->dev->driver)
nvmem->owner = config->dev->driver->owner;
nvmem->stride = config->stride ?: 1;
nvmem->word_size = config->word_size ?: 1;
nvmem->size = config->size;
nvmem->root_only = config->root_only;
nvmem->priv = config->priv;
nvmem->type = config->type;
nvmem->reg_read = config->reg_read;
nvmem->reg_write = config->reg_write;
nvmem->keepout = config->keepout;
nvmem->nkeepout = config->nkeepout;
if (config->of_node)
nvmem->dev.of_node = config->of_node;
else
nvmem->dev.of_node = config->dev->of_node;
switch (config->id) {
case NVMEM_DEVID_NONE:
rval = dev_set_name(&nvmem->dev, "%s", config->name);
break;
case NVMEM_DEVID_AUTO:
rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
break;
default:
rval = dev_set_name(&nvmem->dev, "%s%d",
config->name ? : "nvmem",
config->name ? config->id : nvmem->id);
break;
}
if (rval)
goto err_put_device;
nvmem->read_only = device_property_present(config->dev, "read-only") ||
config->read_only || !nvmem->reg_write;
#ifdef CONFIG_NVMEM_SYSFS
nvmem->dev.groups = nvmem_dev_groups;
#endif
if (nvmem->nkeepout) {
rval = nvmem_validate_keepouts(nvmem);
if (rval)
goto err_put_device;
}
if (config->compat) {
rval = nvmem_sysfs_setup_compat(nvmem, config);
if (rval)
goto err_put_device;
}
if (config->cells) {
rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
if (rval)
goto err_remove_cells;
}
rval = nvmem_add_cells_from_table(nvmem);
if (rval)
goto err_remove_cells;
nvmem: add explicit config option to read old syntax fixed OF cells Binding for fixed NVMEM cells defined directly as NVMEM device subnodes has been deprecated. It has been replaced by the "fixed-layout" NVMEM layout binding. New syntax is meant to be clearer and should help avoiding imprecise bindings. NVMEM subsystem already supports the new binding. It should be a good idea to limit support for old syntax to existing drivers that actually support & use it (we can't break backward compatibility!). That way we additionally encourage new bindings & drivers to ignore deprecated binding. It wasn't clear (to me) if rtc and w1 code actually uses old syntax fixed cells. I enabled them to don't risk any breakage. Signed-off-by: Rafał Miłecki <rafal@milecki.pl> [for meson-{efuse,mx-efuse}.c] Acked-by: Martin Blumenstingl <martin.blumenstingl@googlemail.com> [for mtk-efuse.c, nvmem/core.c, nvmem-provider.h] Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> [MT8192, MT8195 Chromebooks] Tested-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> [for microchip-otpc.c] Reviewed-by: Claudiu Beznea <claudiu.beznea@microchip.com> [SAMA7G5-EK] Tested-by: Claudiu Beznea <claudiu.beznea@microchip.com> Acked-by: Jernej Skrabec <jernej.skrabec@gmail.com> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231020105545.216052-3-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-10-20 10:55:41 +00:00
if (config->add_legacy_fixed_of_cells) {
rval = nvmem_add_cells_from_legacy_of(nvmem);
if (rval)
goto err_remove_cells;
}
rval = nvmem_add_cells_from_fixed_layout(nvmem);
if (rval)
goto err_remove_cells;
dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
rval = device_add(&nvmem->dev);
if (rval)
goto err_remove_cells;
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
rval = nvmem_populate_layout(nvmem);
if (rval)
goto err_remove_dev;
nvmem: core: Expose cells through sysfs The binary content of nvmem devices is available to the user so in the easiest cases, finding the content of a cell is rather easy as it is just a matter of looking at a known and fixed offset. However, nvmem layouts have been recently introduced to cope with more advanced situations, where the offset and size of the cells is not known in advance or is dynamic. When using layouts, more advanced parsers are used by the kernel in order to give direct access to the content of each cell, regardless of its position/size in the underlying device. Unfortunately, these information are not accessible by users, unless by fully re-implementing the parser logic in userland. Let's expose the cells and their content through sysfs to avoid these situations. Of course the relevant NVMEM sysfs Kconfig option must be enabled for this support to be available. Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute group member will be filled at runtime only when relevant and will remain empty otherwise. In this case, as the cells attribute group will be empty, it will not lead to any additional folder/file creation. Exposed cells are read-only. There is, in practice, everything in the core to support a write path, but as I don't see any need for that, I prefer to keep the interface simple (and probably safer). The interface is documented as being in the "testing" state which means we can later add a write attribute if though relevant. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Tested-by: Chen-Yu Tsai <wenst@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-9-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:34 +00:00
#ifdef CONFIG_NVMEM_SYSFS
rval = nvmem_populate_sysfs_cells(nvmem);
if (rval)
goto err_destroy_layout;
#endif
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
return nvmem;
nvmem: core: Expose cells through sysfs The binary content of nvmem devices is available to the user so in the easiest cases, finding the content of a cell is rather easy as it is just a matter of looking at a known and fixed offset. However, nvmem layouts have been recently introduced to cope with more advanced situations, where the offset and size of the cells is not known in advance or is dynamic. When using layouts, more advanced parsers are used by the kernel in order to give direct access to the content of each cell, regardless of its position/size in the underlying device. Unfortunately, these information are not accessible by users, unless by fully re-implementing the parser logic in userland. Let's expose the cells and their content through sysfs to avoid these situations. Of course the relevant NVMEM sysfs Kconfig option must be enabled for this support to be available. Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute group member will be filled at runtime only when relevant and will remain empty otherwise. In this case, as the cells attribute group will be empty, it will not lead to any additional folder/file creation. Exposed cells are read-only. There is, in practice, everything in the core to support a write path, but as I don't see any need for that, I prefer to keep the interface simple (and probably safer). The interface is documented as being in the "testing" state which means we can later add a write attribute if though relevant. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Tested-by: Chen-Yu Tsai <wenst@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-9-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:34 +00:00
#ifdef CONFIG_NVMEM_SYSFS
err_destroy_layout:
nvmem_destroy_layout(nvmem);
#endif
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
err_remove_dev:
device_del(&nvmem->dev);
err_remove_cells:
nvmem_device_remove_all_cells(nvmem);
if (config->compat)
nvmem_sysfs_remove_compat(nvmem, config);
err_put_device:
put_device(&nvmem->dev);
return ERR_PTR(rval);
}
EXPORT_SYMBOL_GPL(nvmem_register);
static void nvmem_device_release(struct kref *kref)
{
struct nvmem_device *nvmem;
nvmem = container_of(kref, struct nvmem_device, refcnt);
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
if (nvmem->flags & FLAG_COMPAT)
device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
nvmem_device_remove_all_cells(nvmem);
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
nvmem_destroy_layout(nvmem);
device_unregister(&nvmem->dev);
}
/**
* nvmem_unregister() - Unregister previously registered nvmem device
*
* @nvmem: Pointer to previously registered nvmem device.
*/
void nvmem_unregister(struct nvmem_device *nvmem)
{
if (nvmem)
kref_put(&nvmem->refcnt, nvmem_device_release);
}
EXPORT_SYMBOL_GPL(nvmem_unregister);
static void devm_nvmem_unregister(void *nvmem)
{
nvmem_unregister(nvmem);
}
/**
* devm_nvmem_register() - Register a managed nvmem device for given
* nvmem_config.
* Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
*
* @dev: Device that uses the nvmem device.
* @config: nvmem device configuration with which nvmem device is created.
*
* Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
* on success.
*/
struct nvmem_device *devm_nvmem_register(struct device *dev,
const struct nvmem_config *config)
{
struct nvmem_device *nvmem;
int ret;
nvmem = nvmem_register(config);
if (IS_ERR(nvmem))
return nvmem;
ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem);
if (ret)
return ERR_PTR(ret);
return nvmem;
}
EXPORT_SYMBOL_GPL(devm_nvmem_register);
static struct nvmem_device *__nvmem_device_get(void *data,
int (*match)(struct device *dev, const void *data))
{
struct nvmem_device *nvmem = NULL;
struct device *dev;
mutex_lock(&nvmem_mutex);
dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
if (dev)
nvmem = to_nvmem_device(dev);
mutex_unlock(&nvmem_mutex);
if (!nvmem)
return ERR_PTR(-EPROBE_DEFER);
if (!try_module_get(nvmem->owner)) {
dev_err(&nvmem->dev,
"could not increase module refcount for cell %s\n",
nvmem_dev_name(nvmem));
put_device(&nvmem->dev);
return ERR_PTR(-EINVAL);
}
kref_get(&nvmem->refcnt);
return nvmem;
}
static void __nvmem_device_put(struct nvmem_device *nvmem)
{
put_device(&nvmem->dev);
module_put(nvmem->owner);
kref_put(&nvmem->refcnt, nvmem_device_release);
}
#if IS_ENABLED(CONFIG_OF)
/**
* of_nvmem_device_get() - Get nvmem device from a given id
*
* @np: Device tree node that uses the nvmem device.
* @id: nvmem name from nvmem-names property.
*
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
* on success.
*/
struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
{
struct device_node *nvmem_np;
struct nvmem_device *nvmem;
int index = 0;
if (id)
index = of_property_match_string(np, "nvmem-names", id);
nvmem_np = of_parse_phandle(np, "nvmem", index);
if (!nvmem_np)
return ERR_PTR(-ENOENT);
nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
of_node_put(nvmem_np);
return nvmem;
}
EXPORT_SYMBOL_GPL(of_nvmem_device_get);
#endif
/**
* nvmem_device_get() - Get nvmem device from a given id
*
* @dev: Device that uses the nvmem device.
* @dev_name: name of the requested nvmem device.
*
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
* on success.
*/
struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
{
if (dev->of_node) { /* try dt first */
struct nvmem_device *nvmem;
nvmem = of_nvmem_device_get(dev->of_node, dev_name);
if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
return nvmem;
}
return __nvmem_device_get((void *)dev_name, device_match_name);
}
EXPORT_SYMBOL_GPL(nvmem_device_get);
/**
* nvmem_device_find() - Find nvmem device with matching function
*
* @data: Data to pass to match function
* @match: Callback function to check device
*
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
* on success.
*/
struct nvmem_device *nvmem_device_find(void *data,
int (*match)(struct device *dev, const void *data))
{
return __nvmem_device_get(data, match);
}
EXPORT_SYMBOL_GPL(nvmem_device_find);
static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
{
struct nvmem_device **nvmem = res;
if (WARN_ON(!nvmem || !*nvmem))
return 0;
return *nvmem == data;
}
static void devm_nvmem_device_release(struct device *dev, void *res)
{
nvmem_device_put(*(struct nvmem_device **)res);
}
/**
* devm_nvmem_device_put() - put alredy got nvmem device
*
* @dev: Device that uses the nvmem device.
* @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
* that needs to be released.
*/
void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
{
int ret;
ret = devres_release(dev, devm_nvmem_device_release,
devm_nvmem_device_match, nvmem);
WARN_ON(ret);
}
EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
/**
* nvmem_device_put() - put alredy got nvmem device
*
* @nvmem: pointer to nvmem device that needs to be released.
*/
void nvmem_device_put(struct nvmem_device *nvmem)
{
__nvmem_device_put(nvmem);
}
EXPORT_SYMBOL_GPL(nvmem_device_put);
/**
* devm_nvmem_device_get() - Get nvmem device of device form a given id
*
* @dev: Device that requests the nvmem device.
* @id: name id for the requested nvmem device.
*
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
* on success. The nvmem_device will be freed by the automatically once the
* device is freed.
*/
struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
{
struct nvmem_device **ptr, *nvmem;
ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
nvmem = nvmem_device_get(dev, id);
if (!IS_ERR(nvmem)) {
*ptr = nvmem;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return nvmem;
}
EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
const char *id, int index)
{
struct nvmem_cell *cell;
const char *name = NULL;
cell = kzalloc(sizeof(*cell), GFP_KERNEL);
if (!cell)
return ERR_PTR(-ENOMEM);
if (id) {
name = kstrdup_const(id, GFP_KERNEL);
if (!name) {
kfree(cell);
return ERR_PTR(-ENOMEM);
}
}
cell->id = name;
cell->entry = entry;
cell->index = index;
return cell;
}
static struct nvmem_cell *
nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
{
struct nvmem_cell_entry *cell_entry;
struct nvmem_cell *cell = ERR_PTR(-ENOENT);
struct nvmem_cell_lookup *lookup;
struct nvmem_device *nvmem;
const char *dev_id;
if (!dev)
return ERR_PTR(-EINVAL);
dev_id = dev_name(dev);
mutex_lock(&nvmem_lookup_mutex);
list_for_each_entry(lookup, &nvmem_lookup_list, node) {
if ((strcmp(lookup->dev_id, dev_id) == 0) &&
(strcmp(lookup->con_id, con_id) == 0)) {
/* This is the right entry. */
nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
device_match_name);
if (IS_ERR(nvmem)) {
/* Provider may not be registered yet. */
cell = ERR_CAST(nvmem);
break;
}
cell_entry = nvmem_find_cell_entry_by_name(nvmem,
lookup->cell_name);
if (!cell_entry) {
__nvmem_device_put(nvmem);
cell = ERR_PTR(-ENOENT);
} else {
cell = nvmem_create_cell(cell_entry, con_id, 0);
if (IS_ERR(cell))
__nvmem_device_put(nvmem);
}
break;
}
}
mutex_unlock(&nvmem_lookup_mutex);
return cell;
}
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
static void nvmem_layout_module_put(struct nvmem_device *nvmem)
{
if (nvmem->layout && nvmem->layout->dev.driver)
module_put(nvmem->layout->dev.driver->owner);
}
#if IS_ENABLED(CONFIG_OF)
static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
{
struct nvmem_cell_entry *iter, *cell = NULL;
mutex_lock(&nvmem_mutex);
list_for_each_entry(iter, &nvmem->cells, node) {
if (np == iter->np) {
cell = iter;
break;
}
}
mutex_unlock(&nvmem_mutex);
return cell;
}
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
static int nvmem_layout_module_get_optional(struct nvmem_device *nvmem)
{
if (!nvmem->layout)
return 0;
if (!nvmem->layout->dev.driver ||
!try_module_get(nvmem->layout->dev.driver->owner))
return -EPROBE_DEFER;
return 0;
}
/**
* of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
*
* @np: Device tree node that uses the nvmem cell.
* @id: nvmem cell name from nvmem-cell-names property, or NULL
* for the cell at index 0 (the lone cell with no accompanying
* nvmem-cell-names property).
*
* Return: Will be an ERR_PTR() on error or a valid pointer
* to a struct nvmem_cell. The nvmem_cell will be freed by the
* nvmem_cell_put().
*/
struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
{
struct device_node *cell_np, *nvmem_np;
struct nvmem_device *nvmem;
struct nvmem_cell_entry *cell_entry;
struct nvmem_cell *cell;
struct of_phandle_args cell_spec;
int index = 0;
int cell_index = 0;
int ret;
/* if cell name exists, find index to the name */
if (id)
index = of_property_match_string(np, "nvmem-cell-names", id);
ret = of_parse_phandle_with_optional_args(np, "nvmem-cells",
"#nvmem-cell-cells",
index, &cell_spec);
if (ret)
return ERR_PTR(-ENOENT);
if (cell_spec.args_count > 1)
return ERR_PTR(-EINVAL);
cell_np = cell_spec.np;
if (cell_spec.args_count)
cell_index = cell_spec.args[0];
nvmem_np = of_get_parent(cell_np);
if (!nvmem_np) {
of_node_put(cell_np);
return ERR_PTR(-EINVAL);
}
/* nvmem layouts produce cells within the nvmem-layout container */
if (of_node_name_eq(nvmem_np, "nvmem-layout")) {
nvmem_np = of_get_next_parent(nvmem_np);
if (!nvmem_np) {
of_node_put(cell_np);
return ERR_PTR(-EINVAL);
}
}
nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
of_node_put(nvmem_np);
if (IS_ERR(nvmem)) {
of_node_put(cell_np);
return ERR_CAST(nvmem);
}
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
ret = nvmem_layout_module_get_optional(nvmem);
if (ret) {
of_node_put(cell_np);
__nvmem_device_put(nvmem);
return ERR_PTR(ret);
}
cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
of_node_put(cell_np);
if (!cell_entry) {
__nvmem_device_put(nvmem);
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
nvmem_layout_module_put(nvmem);
if (nvmem->layout)
return ERR_PTR(-EPROBE_DEFER);
else
return ERR_PTR(-ENOENT);
}
cell = nvmem_create_cell(cell_entry, id, cell_index);
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
if (IS_ERR(cell)) {
__nvmem_device_put(nvmem);
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
nvmem_layout_module_put(nvmem);
}
return cell;
}
EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
#endif
/**
* nvmem_cell_get() - Get nvmem cell of device form a given cell name
*
* @dev: Device that requests the nvmem cell.
* @id: nvmem cell name to get (this corresponds with the name from the
* nvmem-cell-names property for DT systems and with the con_id from
* the lookup entry for non-DT systems).
*
* Return: Will be an ERR_PTR() on error or a valid pointer
* to a struct nvmem_cell. The nvmem_cell will be freed by the
* nvmem_cell_put().
*/
struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
{
struct nvmem_cell *cell;
if (dev->of_node) { /* try dt first */
cell = of_nvmem_cell_get(dev->of_node, id);
if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
return cell;
}
/* NULL cell id only allowed for device tree; invalid otherwise */
if (!id)
nvmem: Don't let a NULL cell_id for nvmem_cell_get() crash us In commit ca04d9d3e1b1 ("phy: qcom-qusb2: New driver for QUSB2 PHY on Qcom chips") you can see a call like: devm_nvmem_cell_get(dev, NULL); Note that the cell ID passed to the function is NULL. This is because the qcom-qusb2 driver is expected to work only on systems where the PHY node is hooked up via device-tree and is nameless. This works OK for the most part. The first thing nvmem_cell_get() does is to call of_nvmem_cell_get() and there it's documented that a NULL name is fine. The problem happens when the call to of_nvmem_cell_get() returns -EINVAL. In such a case we'll fall back to nvmem_cell_get_from_list() and eventually might (if nvmem_cells isn't an empty list) crash with something that looks like: strcmp nvmem_find_cell __nvmem_device_get nvmem_cell_get_from_list nvmem_cell_get devm_nvmem_cell_get qusb2_phy_probe There are several different ways we could fix this problem: One could argue that perhaps the qcom-qusb2 driver should be changed to use of_nvmem_cell_get() which is allowed to have a NULL name. In that case, we'd need to add a patche to introduce devm_of_nvmem_cell_get() since the qcom-qusb2 driver is using devm managed resources. One could also argue that perhaps we could just add a name to qcom-qusb2. That would be OK but I believe it effectively changes the device tree bindings, so maybe it's a no-go. In this patch I have chosen to fix the problem by simply not crashing when a NULL cell_id is passed to nvmem_cell_get(). NOTE: that for the qcom-qusb2 driver the "nvmem-cells" property is defined to be optional and thus it's expected to be a common case that we would hit this crash and this is more than just a theoretical fix. Fixes: ca04d9d3e1b1 ("phy: qcom-qusb2: New driver for QUSB2 PHY on Qcom chips") Signed-off-by: Douglas Anderson <dianders@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-06-18 17:30:43 +00:00
return ERR_PTR(-EINVAL);
return nvmem_cell_get_from_lookup(dev, id);
}
EXPORT_SYMBOL_GPL(nvmem_cell_get);
static void devm_nvmem_cell_release(struct device *dev, void *res)
{
nvmem_cell_put(*(struct nvmem_cell **)res);
}
/**
* devm_nvmem_cell_get() - Get nvmem cell of device form a given id
*
* @dev: Device that requests the nvmem cell.
* @id: nvmem cell name id to get.
*
* Return: Will be an ERR_PTR() on error or a valid pointer
* to a struct nvmem_cell. The nvmem_cell will be freed by the
* automatically once the device is freed.
*/
struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
{
struct nvmem_cell **ptr, *cell;
ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
cell = nvmem_cell_get(dev, id);
if (!IS_ERR(cell)) {
*ptr = cell;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return cell;
}
EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
{
struct nvmem_cell **c = res;
if (WARN_ON(!c || !*c))
return 0;
return *c == data;
}
/**
* devm_nvmem_cell_put() - Release previously allocated nvmem cell
* from devm_nvmem_cell_get.
*
* @dev: Device that requests the nvmem cell.
* @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
*/
void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
{
int ret;
ret = devres_release(dev, devm_nvmem_cell_release,
devm_nvmem_cell_match, cell);
WARN_ON(ret);
}
EXPORT_SYMBOL(devm_nvmem_cell_put);
/**
* nvmem_cell_put() - Release previously allocated nvmem cell.
*
* @cell: Previously allocated nvmem cell by nvmem_cell_get().
*/
void nvmem_cell_put(struct nvmem_cell *cell)
{
struct nvmem_device *nvmem = cell->entry->nvmem;
if (cell->id)
kfree_const(cell->id);
kfree(cell);
__nvmem_device_put(nvmem);
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
nvmem_layout_module_put(nvmem);
}
EXPORT_SYMBOL_GPL(nvmem_cell_put);
static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
{
u8 *p, *b;
int i, extra, bit_offset = cell->bit_offset;
p = b = buf;
if (bit_offset) {
/* First shift */
*b++ >>= bit_offset;
/* setup rest of the bytes if any */
for (i = 1; i < cell->bytes; i++) {
/* Get bits from next byte and shift them towards msb */
*p |= *b << (BITS_PER_BYTE - bit_offset);
p = b;
*b++ >>= bit_offset;
}
} else {
/* point to the msb */
p += cell->bytes - 1;
}
/* result fits in less bytes */
extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
while (--extra >= 0)
*p-- = 0;
/* clear msb bits if any leftover in the last byte */
nvmem: Fix shift-out-of-bound (UBSAN) with byte size cells If a cell has 'nbits' equal to a multiple of BITS_PER_BYTE the logic *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); will become undefined behavior because nbits modulo BITS_PER_BYTE is 0, and we subtract one from that making a large number that is then shifted more than the number of bits that fit into an unsigned long. UBSAN reports this problem: UBSAN: shift-out-of-bounds in drivers/nvmem/core.c:1386:8 shift exponent 64 is too large for 64-bit type 'unsigned long' CPU: 6 PID: 7 Comm: kworker/u16:0 Not tainted 5.15.0-rc3+ #9 Hardware name: Google Lazor (rev3+) with KB Backlight (DT) Workqueue: events_unbound deferred_probe_work_func Call trace: dump_backtrace+0x0/0x170 show_stack+0x24/0x30 dump_stack_lvl+0x64/0x7c dump_stack+0x18/0x38 ubsan_epilogue+0x10/0x54 __ubsan_handle_shift_out_of_bounds+0x180/0x194 __nvmem_cell_read+0x1ec/0x21c nvmem_cell_read+0x58/0x94 nvmem_cell_read_variable_common+0x4c/0xb0 nvmem_cell_read_variable_le_u32+0x40/0x100 a6xx_gpu_init+0x170/0x2f4 adreno_bind+0x174/0x284 component_bind_all+0xf0/0x264 msm_drm_bind+0x1d8/0x7a0 try_to_bring_up_master+0x164/0x1ac __component_add+0xbc/0x13c component_add+0x20/0x2c dp_display_probe+0x340/0x384 platform_probe+0xc0/0x100 really_probe+0x110/0x304 __driver_probe_device+0xb8/0x120 driver_probe_device+0x4c/0xfc __device_attach_driver+0xb0/0x128 bus_for_each_drv+0x90/0xdc __device_attach+0xc8/0x174 device_initial_probe+0x20/0x2c bus_probe_device+0x40/0xa4 deferred_probe_work_func+0x7c/0xb8 process_one_work+0x128/0x21c process_scheduled_works+0x40/0x54 worker_thread+0x1ec/0x2a8 kthread+0x138/0x158 ret_from_fork+0x10/0x20 Fix it by making sure there are any bits to mask out. Fixes: 69aba7948cbe ("nvmem: Add a simple NVMEM framework for consumers") Cc: Douglas Anderson <dianders@chromium.org> Cc: stable@vger.kernel.org Signed-off-by: Stephen Boyd <swboyd@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20211013124511.18726-1-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2021-10-13 12:45:11 +00:00
if (cell->nbits % BITS_PER_BYTE)
*p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
}
static int __nvmem_cell_read(struct nvmem_device *nvmem,
struct nvmem_cell_entry *cell,
void *buf, size_t *len, const char *id, int index)
{
int rc;
nvmem: core: support specifying both: cell raw data & post read lengths Callback .read_post_process() is designed to modify raw cell content before providing it to the consumer. So far we were dealing with modifications that didn't affect cell size (length). In some cases however cell content needs to be reformatted and resized. It's required e.g. to provide properly formatted MAC address in case it's stored in a non-binary format (e.g. using ASCII). There were few discussions how to optimally handle that. Following possible solutions were considered: 1. Allow .read_post_process() to realloc (resize) content buffer 2. Allow .read_post_process() to adjust (decrease) just buffer length 3. Register NVMEM cells using post-read sizes The preferred solution was the last one. The problem is that simply adjusting "bytes" in NVMEM providers would result in core code NOT passing whole raw data to .read_post_process() callbacks. It means callback functions couldn't do their job without somehow manually reading original cell content on their own. This patch deals with that by registering NVMEM cells with both lengths: raw content one and post read one. It allows: 1. Core code to read whole raw cell content 2. Callbacks to return content they want Signed-off-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20230404172148.82422-35-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-04-04 17:21:42 +00:00
rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len);
remove lots of IS_ERR_VALUE abuses Most users of IS_ERR_VALUE() in the kernel are wrong, as they pass an 'int' into a function that takes an 'unsigned long' argument. This happens to work because the type is sign-extended on 64-bit architectures before it gets converted into an unsigned type. However, anything that passes an 'unsigned short' or 'unsigned int' argument into IS_ERR_VALUE() is guaranteed to be broken, as are 8-bit integers and types that are wider than 'unsigned long'. Andrzej Hajda has already fixed a lot of the worst abusers that were causing actual bugs, but it would be nice to prevent any users that are not passing 'unsigned long' arguments. This patch changes all users of IS_ERR_VALUE() that I could find on 32-bit ARM randconfig builds and x86 allmodconfig. For the moment, this doesn't change the definition of IS_ERR_VALUE() because there are probably still architecture specific users elsewhere. Almost all the warnings I got are for files that are better off using 'if (err)' or 'if (err < 0)'. The only legitimate user I could find that we get a warning for is the (32-bit only) freescale fman driver, so I did not remove the IS_ERR_VALUE() there but changed the type to 'unsigned long'. For 9pfs, I just worked around one user whose calling conventions are so obscure that I did not dare change the behavior. I was using this definition for testing: #define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \ unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO)) which ends up making all 16-bit or wider types work correctly with the most plausible interpretation of what IS_ERR_VALUE() was supposed to return according to its users, but also causes a compile-time warning for any users that do not pass an 'unsigned long' argument. I suggested this approach earlier this year, but back then we ended up deciding to just fix the users that are obviously broken. After the initial warning that caused me to get involved in the discussion (fs/gfs2/dir.c) showed up again in the mainline kernel, Linus asked me to send the whole thing again. [ Updated the 9p parts as per Al Viro - Linus ] Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Andrzej Hajda <a.hajda@samsung.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.org/lkml/2016/1/7/363 Link: https://lkml.org/lkml/2016/5/27/486 Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-27 21:23:25 +00:00
if (rc)
return rc;
/* shift bits in-place */
if (cell->bit_offset || cell->nbits)
nvmem_shift_read_buffer_in_place(cell, buf);
if (cell->read_post_process) {
rc = cell->read_post_process(cell->priv, id, index,
nvmem: core: support specifying both: cell raw data & post read lengths Callback .read_post_process() is designed to modify raw cell content before providing it to the consumer. So far we were dealing with modifications that didn't affect cell size (length). In some cases however cell content needs to be reformatted and resized. It's required e.g. to provide properly formatted MAC address in case it's stored in a non-binary format (e.g. using ASCII). There were few discussions how to optimally handle that. Following possible solutions were considered: 1. Allow .read_post_process() to realloc (resize) content buffer 2. Allow .read_post_process() to adjust (decrease) just buffer length 3. Register NVMEM cells using post-read sizes The preferred solution was the last one. The problem is that simply adjusting "bytes" in NVMEM providers would result in core code NOT passing whole raw data to .read_post_process() callbacks. It means callback functions couldn't do their job without somehow manually reading original cell content on their own. This patch deals with that by registering NVMEM cells with both lengths: raw content one and post read one. It allows: 1. Core code to read whole raw cell content 2. Callbacks to return content they want Signed-off-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20230404172148.82422-35-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-04-04 17:21:42 +00:00
cell->offset, buf, cell->raw_len);
if (rc)
return rc;
}
if (len)
*len = cell->bytes;
return 0;
}
/**
* nvmem_cell_read() - Read a given nvmem cell
*
* @cell: nvmem cell to be read.
* @len: pointer to length of cell which will be populated on successful read;
* can be NULL.
*
* Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
* buffer should be freed by the consumer with a kfree().
*/
void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
{
nvmem: core: support specifying both: cell raw data & post read lengths Callback .read_post_process() is designed to modify raw cell content before providing it to the consumer. So far we were dealing with modifications that didn't affect cell size (length). In some cases however cell content needs to be reformatted and resized. It's required e.g. to provide properly formatted MAC address in case it's stored in a non-binary format (e.g. using ASCII). There were few discussions how to optimally handle that. Following possible solutions were considered: 1. Allow .read_post_process() to realloc (resize) content buffer 2. Allow .read_post_process() to adjust (decrease) just buffer length 3. Register NVMEM cells using post-read sizes The preferred solution was the last one. The problem is that simply adjusting "bytes" in NVMEM providers would result in core code NOT passing whole raw data to .read_post_process() callbacks. It means callback functions couldn't do their job without somehow manually reading original cell content on their own. This patch deals with that by registering NVMEM cells with both lengths: raw content one and post read one. It allows: 1. Core code to read whole raw cell content 2. Callbacks to return content they want Signed-off-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20230404172148.82422-35-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-04-04 17:21:42 +00:00
struct nvmem_cell_entry *entry = cell->entry;
struct nvmem_device *nvmem = entry->nvmem;
u8 *buf;
int rc;
if (!nvmem)
return ERR_PTR(-EINVAL);
nvmem: core: support specifying both: cell raw data & post read lengths Callback .read_post_process() is designed to modify raw cell content before providing it to the consumer. So far we were dealing with modifications that didn't affect cell size (length). In some cases however cell content needs to be reformatted and resized. It's required e.g. to provide properly formatted MAC address in case it's stored in a non-binary format (e.g. using ASCII). There were few discussions how to optimally handle that. Following possible solutions were considered: 1. Allow .read_post_process() to realloc (resize) content buffer 2. Allow .read_post_process() to adjust (decrease) just buffer length 3. Register NVMEM cells using post-read sizes The preferred solution was the last one. The problem is that simply adjusting "bytes" in NVMEM providers would result in core code NOT passing whole raw data to .read_post_process() callbacks. It means callback functions couldn't do their job without somehow manually reading original cell content on their own. This patch deals with that by registering NVMEM cells with both lengths: raw content one and post read one. It allows: 1. Core code to read whole raw cell content 2. Callbacks to return content they want Signed-off-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20230404172148.82422-35-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-04-04 17:21:42 +00:00
buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index);
remove lots of IS_ERR_VALUE abuses Most users of IS_ERR_VALUE() in the kernel are wrong, as they pass an 'int' into a function that takes an 'unsigned long' argument. This happens to work because the type is sign-extended on 64-bit architectures before it gets converted into an unsigned type. However, anything that passes an 'unsigned short' or 'unsigned int' argument into IS_ERR_VALUE() is guaranteed to be broken, as are 8-bit integers and types that are wider than 'unsigned long'. Andrzej Hajda has already fixed a lot of the worst abusers that were causing actual bugs, but it would be nice to prevent any users that are not passing 'unsigned long' arguments. This patch changes all users of IS_ERR_VALUE() that I could find on 32-bit ARM randconfig builds and x86 allmodconfig. For the moment, this doesn't change the definition of IS_ERR_VALUE() because there are probably still architecture specific users elsewhere. Almost all the warnings I got are for files that are better off using 'if (err)' or 'if (err < 0)'. The only legitimate user I could find that we get a warning for is the (32-bit only) freescale fman driver, so I did not remove the IS_ERR_VALUE() there but changed the type to 'unsigned long'. For 9pfs, I just worked around one user whose calling conventions are so obscure that I did not dare change the behavior. I was using this definition for testing: #define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \ unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO)) which ends up making all 16-bit or wider types work correctly with the most plausible interpretation of what IS_ERR_VALUE() was supposed to return according to its users, but also causes a compile-time warning for any users that do not pass an 'unsigned long' argument. I suggested this approach earlier this year, but back then we ended up deciding to just fix the users that are obviously broken. After the initial warning that caused me to get involved in the discussion (fs/gfs2/dir.c) showed up again in the mainline kernel, Linus asked me to send the whole thing again. [ Updated the 9p parts as per Al Viro - Linus ] Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Andrzej Hajda <a.hajda@samsung.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.org/lkml/2016/1/7/363 Link: https://lkml.org/lkml/2016/5/27/486 Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-27 21:23:25 +00:00
if (rc) {
kfree(buf);
return ERR_PTR(rc);
}
return buf;
}
EXPORT_SYMBOL_GPL(nvmem_cell_read);
static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
u8 *_buf, int len)
{
struct nvmem_device *nvmem = cell->nvmem;
int i, rc, nbits, bit_offset = cell->bit_offset;
u8 v, *p, *buf, *b, pbyte, pbits;
nbits = cell->nbits;
buf = kzalloc(cell->bytes, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
memcpy(buf, _buf, len);
p = b = buf;
if (bit_offset) {
pbyte = *b;
*b <<= bit_offset;
/* setup the first byte with lsb bits from nvmem */
rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
if (rc)
goto err;
*b++ |= GENMASK(bit_offset - 1, 0) & v;
/* setup rest of the byte if any */
for (i = 1; i < cell->bytes; i++) {
/* Get last byte bits and shift them towards lsb */
pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
pbyte = *b;
p = b;
*b <<= bit_offset;
*b++ |= pbits;
}
}
/* if it's not end on byte boundary */
if ((nbits + bit_offset) % BITS_PER_BYTE) {
/* setup the last byte with msb bits from nvmem */
rc = nvmem_reg_read(nvmem,
cell->offset + cell->bytes - 1, &v, 1);
if (rc)
goto err;
*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
}
return buf;
err:
kfree(buf);
return ERR_PTR(rc);
}
static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
{
struct nvmem_device *nvmem = cell->nvmem;
int rc;
if (!nvmem || nvmem->read_only ||
(cell->bit_offset == 0 && len != cell->bytes))
return -EINVAL;
/*
* Any cells which have a read_post_process hook are read-only because
* we cannot reverse the operation and it might affect other cells,
* too.
*/
if (cell->read_post_process)
return -EINVAL;
if (cell->bit_offset || cell->nbits) {
buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
if (IS_ERR(buf))
return PTR_ERR(buf);
}
rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
/* free the tmp buffer */
if (cell->bit_offset || cell->nbits)
kfree(buf);
remove lots of IS_ERR_VALUE abuses Most users of IS_ERR_VALUE() in the kernel are wrong, as they pass an 'int' into a function that takes an 'unsigned long' argument. This happens to work because the type is sign-extended on 64-bit architectures before it gets converted into an unsigned type. However, anything that passes an 'unsigned short' or 'unsigned int' argument into IS_ERR_VALUE() is guaranteed to be broken, as are 8-bit integers and types that are wider than 'unsigned long'. Andrzej Hajda has already fixed a lot of the worst abusers that were causing actual bugs, but it would be nice to prevent any users that are not passing 'unsigned long' arguments. This patch changes all users of IS_ERR_VALUE() that I could find on 32-bit ARM randconfig builds and x86 allmodconfig. For the moment, this doesn't change the definition of IS_ERR_VALUE() because there are probably still architecture specific users elsewhere. Almost all the warnings I got are for files that are better off using 'if (err)' or 'if (err < 0)'. The only legitimate user I could find that we get a warning for is the (32-bit only) freescale fman driver, so I did not remove the IS_ERR_VALUE() there but changed the type to 'unsigned long'. For 9pfs, I just worked around one user whose calling conventions are so obscure that I did not dare change the behavior. I was using this definition for testing: #define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \ unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO)) which ends up making all 16-bit or wider types work correctly with the most plausible interpretation of what IS_ERR_VALUE() was supposed to return according to its users, but also causes a compile-time warning for any users that do not pass an 'unsigned long' argument. I suggested this approach earlier this year, but back then we ended up deciding to just fix the users that are obviously broken. After the initial warning that caused me to get involved in the discussion (fs/gfs2/dir.c) showed up again in the mainline kernel, Linus asked me to send the whole thing again. [ Updated the 9p parts as per Al Viro - Linus ] Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Andrzej Hajda <a.hajda@samsung.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.org/lkml/2016/1/7/363 Link: https://lkml.org/lkml/2016/5/27/486 Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-27 21:23:25 +00:00
if (rc)
return rc;
return len;
}
/**
* nvmem_cell_write() - Write to a given nvmem cell
*
* @cell: nvmem cell to be written.
* @buf: Buffer to be written.
* @len: length of buffer to be written to nvmem cell.
*
* Return: length of bytes written or negative on failure.
*/
int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
{
return __nvmem_cell_entry_write(cell->entry, buf, len);
}
EXPORT_SYMBOL_GPL(nvmem_cell_write);
static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
void *val, size_t count)
{
struct nvmem_cell *cell;
void *buf;
size_t len;
cell = nvmem_cell_get(dev, cell_id);
if (IS_ERR(cell))
return PTR_ERR(cell);
buf = nvmem_cell_read(cell, &len);
if (IS_ERR(buf)) {
nvmem_cell_put(cell);
return PTR_ERR(buf);
}
if (len != count) {
kfree(buf);
nvmem_cell_put(cell);
return -EINVAL;
}
memcpy(val, buf, count);
kfree(buf);
nvmem_cell_put(cell);
return 0;
}
/**
* nvmem_cell_read_u8() - Read a cell value as a u8
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
* @val: pointer to output value.
*
* Return: 0 on success or negative errno.
*/
int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
{
return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
/**
* nvmem_cell_read_u16() - Read a cell value as a u16
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
* @val: pointer to output value.
*
* Return: 0 on success or negative errno.
*/
int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
{
return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
/**
* nvmem_cell_read_u32() - Read a cell value as a u32
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
* @val: pointer to output value.
*
* Return: 0 on success or negative errno.
*/
int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
{
return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
/**
* nvmem_cell_read_u64() - Read a cell value as a u64
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
* @val: pointer to output value.
*
* Return: 0 on success or negative errno.
*/
int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
{
return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
static const void *nvmem_cell_read_variable_common(struct device *dev,
const char *cell_id,
size_t max_len, size_t *len)
nvmem: core: Add functions to make number reading easy Sometimes the clients of nvmem just want to get a number out of nvmem. They don't want to think about exactly how many bytes the nvmem cell took up. They just want the number. Let's make it easy. In general this concept is useful because nvmem space is precious and usually the fewest bits are allocated that will hold a given value on a given system. However, even though small numbers might be fine on one system that doesn't mean that logically the number couldn't be bigger. Imagine nvmem containing a max frequency for a component. On one system perhaps that fits in 16 bits. On another system it might fit in 32 bits. The code reading this number doesn't care--it just wants the number. We'll provide two functions: nvmem_cell_read_variable_le_u32() and nvmem_cell_read_variable_le_u64(). Comparing these to the existing functions like nvmem_cell_read_u32(): * These new functions have no problems if the value was stored in nvmem in fewer bytes. It's OK to use these function as long as the value stored will fit in 32-bits (or 64-bits). * These functions avoid problems that the earlier APIs had with bit offsets. For instance, you can't use nvmem_cell_read_u32() to read a value has nbits=32 and bit_offset=4 because the nvmem cell must be at least 5 bytes big to hold this value. The new API accounts for this and works fine. * These functions make it very explicit that they assume that the number was stored in little endian format. The old functions made this assumption whenever bit_offset was non-zero (see nvmem_shift_read_buffer_in_place()) but didn't whenever the bit_offset was zero. NOTE: it's assumed that we don't need an 8-bit or 16-bit version of this function. The 32-bit version of the function can be used to read 8-bit or 16-bit data. At the moment, I'm only adding the "unsigned" versions of these functions, but if it ends up being useful someone could add a "signed" version that did 2's complement sign extension. At the moment, I'm only adding the "little endian" versions of these functions. Adding the "big endian" version would require adding "big endian" support to nvmem_shift_read_buffer_in_place(). Signed-off-by: Douglas Anderson <dianders@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20210330111241.19401-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2021-03-30 11:12:37 +00:00
{
struct nvmem_cell *cell;
int nbits;
void *buf;
cell = nvmem_cell_get(dev, cell_id);
if (IS_ERR(cell))
return cell;
nbits = cell->entry->nbits;
nvmem: core: Add functions to make number reading easy Sometimes the clients of nvmem just want to get a number out of nvmem. They don't want to think about exactly how many bytes the nvmem cell took up. They just want the number. Let's make it easy. In general this concept is useful because nvmem space is precious and usually the fewest bits are allocated that will hold a given value on a given system. However, even though small numbers might be fine on one system that doesn't mean that logically the number couldn't be bigger. Imagine nvmem containing a max frequency for a component. On one system perhaps that fits in 16 bits. On another system it might fit in 32 bits. The code reading this number doesn't care--it just wants the number. We'll provide two functions: nvmem_cell_read_variable_le_u32() and nvmem_cell_read_variable_le_u64(). Comparing these to the existing functions like nvmem_cell_read_u32(): * These new functions have no problems if the value was stored in nvmem in fewer bytes. It's OK to use these function as long as the value stored will fit in 32-bits (or 64-bits). * These functions avoid problems that the earlier APIs had with bit offsets. For instance, you can't use nvmem_cell_read_u32() to read a value has nbits=32 and bit_offset=4 because the nvmem cell must be at least 5 bytes big to hold this value. The new API accounts for this and works fine. * These functions make it very explicit that they assume that the number was stored in little endian format. The old functions made this assumption whenever bit_offset was non-zero (see nvmem_shift_read_buffer_in_place()) but didn't whenever the bit_offset was zero. NOTE: it's assumed that we don't need an 8-bit or 16-bit version of this function. The 32-bit version of the function can be used to read 8-bit or 16-bit data. At the moment, I'm only adding the "unsigned" versions of these functions, but if it ends up being useful someone could add a "signed" version that did 2's complement sign extension. At the moment, I'm only adding the "little endian" versions of these functions. Adding the "big endian" version would require adding "big endian" support to nvmem_shift_read_buffer_in_place(). Signed-off-by: Douglas Anderson <dianders@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20210330111241.19401-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2021-03-30 11:12:37 +00:00
buf = nvmem_cell_read(cell, len);
nvmem_cell_put(cell);
if (IS_ERR(buf))
return buf;
/*
* If nbits is set then nvmem_cell_read() can significantly exaggerate
* the length of the real data. Throw away the extra junk.
*/
if (nbits)
*len = DIV_ROUND_UP(nbits, 8);
if (*len > max_len) {
kfree(buf);
return ERR_PTR(-ERANGE);
}
return buf;
}
/**
* nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
* @val: pointer to output value.
*
* Return: 0 on success or negative errno.
*/
int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
u32 *val)
{
size_t len;
const u8 *buf;
nvmem: core: Add functions to make number reading easy Sometimes the clients of nvmem just want to get a number out of nvmem. They don't want to think about exactly how many bytes the nvmem cell took up. They just want the number. Let's make it easy. In general this concept is useful because nvmem space is precious and usually the fewest bits are allocated that will hold a given value on a given system. However, even though small numbers might be fine on one system that doesn't mean that logically the number couldn't be bigger. Imagine nvmem containing a max frequency for a component. On one system perhaps that fits in 16 bits. On another system it might fit in 32 bits. The code reading this number doesn't care--it just wants the number. We'll provide two functions: nvmem_cell_read_variable_le_u32() and nvmem_cell_read_variable_le_u64(). Comparing these to the existing functions like nvmem_cell_read_u32(): * These new functions have no problems if the value was stored in nvmem in fewer bytes. It's OK to use these function as long as the value stored will fit in 32-bits (or 64-bits). * These functions avoid problems that the earlier APIs had with bit offsets. For instance, you can't use nvmem_cell_read_u32() to read a value has nbits=32 and bit_offset=4 because the nvmem cell must be at least 5 bytes big to hold this value. The new API accounts for this and works fine. * These functions make it very explicit that they assume that the number was stored in little endian format. The old functions made this assumption whenever bit_offset was non-zero (see nvmem_shift_read_buffer_in_place()) but didn't whenever the bit_offset was zero. NOTE: it's assumed that we don't need an 8-bit or 16-bit version of this function. The 32-bit version of the function can be used to read 8-bit or 16-bit data. At the moment, I'm only adding the "unsigned" versions of these functions, but if it ends up being useful someone could add a "signed" version that did 2's complement sign extension. At the moment, I'm only adding the "little endian" versions of these functions. Adding the "big endian" version would require adding "big endian" support to nvmem_shift_read_buffer_in_place(). Signed-off-by: Douglas Anderson <dianders@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20210330111241.19401-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2021-03-30 11:12:37 +00:00
int i;
buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
if (IS_ERR(buf))
return PTR_ERR(buf);
/* Copy w/ implicit endian conversion */
*val = 0;
for (i = 0; i < len; i++)
*val |= buf[i] << (8 * i);
kfree(buf);
return 0;
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
/**
* nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
* @val: pointer to output value.
*
* Return: 0 on success or negative errno.
*/
int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
u64 *val)
{
size_t len;
const u8 *buf;
nvmem: core: Add functions to make number reading easy Sometimes the clients of nvmem just want to get a number out of nvmem. They don't want to think about exactly how many bytes the nvmem cell took up. They just want the number. Let's make it easy. In general this concept is useful because nvmem space is precious and usually the fewest bits are allocated that will hold a given value on a given system. However, even though small numbers might be fine on one system that doesn't mean that logically the number couldn't be bigger. Imagine nvmem containing a max frequency for a component. On one system perhaps that fits in 16 bits. On another system it might fit in 32 bits. The code reading this number doesn't care--it just wants the number. We'll provide two functions: nvmem_cell_read_variable_le_u32() and nvmem_cell_read_variable_le_u64(). Comparing these to the existing functions like nvmem_cell_read_u32(): * These new functions have no problems if the value was stored in nvmem in fewer bytes. It's OK to use these function as long as the value stored will fit in 32-bits (or 64-bits). * These functions avoid problems that the earlier APIs had with bit offsets. For instance, you can't use nvmem_cell_read_u32() to read a value has nbits=32 and bit_offset=4 because the nvmem cell must be at least 5 bytes big to hold this value. The new API accounts for this and works fine. * These functions make it very explicit that they assume that the number was stored in little endian format. The old functions made this assumption whenever bit_offset was non-zero (see nvmem_shift_read_buffer_in_place()) but didn't whenever the bit_offset was zero. NOTE: it's assumed that we don't need an 8-bit or 16-bit version of this function. The 32-bit version of the function can be used to read 8-bit or 16-bit data. At the moment, I'm only adding the "unsigned" versions of these functions, but if it ends up being useful someone could add a "signed" version that did 2's complement sign extension. At the moment, I'm only adding the "little endian" versions of these functions. Adding the "big endian" version would require adding "big endian" support to nvmem_shift_read_buffer_in_place(). Signed-off-by: Douglas Anderson <dianders@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20210330111241.19401-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2021-03-30 11:12:37 +00:00
int i;
buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
if (IS_ERR(buf))
return PTR_ERR(buf);
/* Copy w/ implicit endian conversion */
*val = 0;
for (i = 0; i < len; i++)
*val |= (uint64_t)buf[i] << (8 * i);
nvmem: core: Add functions to make number reading easy Sometimes the clients of nvmem just want to get a number out of nvmem. They don't want to think about exactly how many bytes the nvmem cell took up. They just want the number. Let's make it easy. In general this concept is useful because nvmem space is precious and usually the fewest bits are allocated that will hold a given value on a given system. However, even though small numbers might be fine on one system that doesn't mean that logically the number couldn't be bigger. Imagine nvmem containing a max frequency for a component. On one system perhaps that fits in 16 bits. On another system it might fit in 32 bits. The code reading this number doesn't care--it just wants the number. We'll provide two functions: nvmem_cell_read_variable_le_u32() and nvmem_cell_read_variable_le_u64(). Comparing these to the existing functions like nvmem_cell_read_u32(): * These new functions have no problems if the value was stored in nvmem in fewer bytes. It's OK to use these function as long as the value stored will fit in 32-bits (or 64-bits). * These functions avoid problems that the earlier APIs had with bit offsets. For instance, you can't use nvmem_cell_read_u32() to read a value has nbits=32 and bit_offset=4 because the nvmem cell must be at least 5 bytes big to hold this value. The new API accounts for this and works fine. * These functions make it very explicit that they assume that the number was stored in little endian format. The old functions made this assumption whenever bit_offset was non-zero (see nvmem_shift_read_buffer_in_place()) but didn't whenever the bit_offset was zero. NOTE: it's assumed that we don't need an 8-bit or 16-bit version of this function. The 32-bit version of the function can be used to read 8-bit or 16-bit data. At the moment, I'm only adding the "unsigned" versions of these functions, but if it ends up being useful someone could add a "signed" version that did 2's complement sign extension. At the moment, I'm only adding the "little endian" versions of these functions. Adding the "big endian" version would require adding "big endian" support to nvmem_shift_read_buffer_in_place(). Signed-off-by: Douglas Anderson <dianders@chromium.org> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20210330111241.19401-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2021-03-30 11:12:37 +00:00
kfree(buf);
return 0;
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
/**
* nvmem_device_cell_read() - Read a given nvmem device and cell
*
* @nvmem: nvmem device to read from.
* @info: nvmem cell info to be read.
* @buf: buffer pointer which will be populated on successful read.
*
* Return: length of successful bytes read on success and negative
* error code on error.
*/
ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
struct nvmem_cell_info *info, void *buf)
{
struct nvmem_cell_entry cell;
int rc;
ssize_t len;
if (!nvmem)
return -EINVAL;
rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
remove lots of IS_ERR_VALUE abuses Most users of IS_ERR_VALUE() in the kernel are wrong, as they pass an 'int' into a function that takes an 'unsigned long' argument. This happens to work because the type is sign-extended on 64-bit architectures before it gets converted into an unsigned type. However, anything that passes an 'unsigned short' or 'unsigned int' argument into IS_ERR_VALUE() is guaranteed to be broken, as are 8-bit integers and types that are wider than 'unsigned long'. Andrzej Hajda has already fixed a lot of the worst abusers that were causing actual bugs, but it would be nice to prevent any users that are not passing 'unsigned long' arguments. This patch changes all users of IS_ERR_VALUE() that I could find on 32-bit ARM randconfig builds and x86 allmodconfig. For the moment, this doesn't change the definition of IS_ERR_VALUE() because there are probably still architecture specific users elsewhere. Almost all the warnings I got are for files that are better off using 'if (err)' or 'if (err < 0)'. The only legitimate user I could find that we get a warning for is the (32-bit only) freescale fman driver, so I did not remove the IS_ERR_VALUE() there but changed the type to 'unsigned long'. For 9pfs, I just worked around one user whose calling conventions are so obscure that I did not dare change the behavior. I was using this definition for testing: #define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \ unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO)) which ends up making all 16-bit or wider types work correctly with the most plausible interpretation of what IS_ERR_VALUE() was supposed to return according to its users, but also causes a compile-time warning for any users that do not pass an 'unsigned long' argument. I suggested this approach earlier this year, but back then we ended up deciding to just fix the users that are obviously broken. After the initial warning that caused me to get involved in the discussion (fs/gfs2/dir.c) showed up again in the mainline kernel, Linus asked me to send the whole thing again. [ Updated the 9p parts as per Al Viro - Linus ] Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Andrzej Hajda <a.hajda@samsung.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.org/lkml/2016/1/7/363 Link: https://lkml.org/lkml/2016/5/27/486 Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-27 21:23:25 +00:00
if (rc)
return rc;
rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0);
remove lots of IS_ERR_VALUE abuses Most users of IS_ERR_VALUE() in the kernel are wrong, as they pass an 'int' into a function that takes an 'unsigned long' argument. This happens to work because the type is sign-extended on 64-bit architectures before it gets converted into an unsigned type. However, anything that passes an 'unsigned short' or 'unsigned int' argument into IS_ERR_VALUE() is guaranteed to be broken, as are 8-bit integers and types that are wider than 'unsigned long'. Andrzej Hajda has already fixed a lot of the worst abusers that were causing actual bugs, but it would be nice to prevent any users that are not passing 'unsigned long' arguments. This patch changes all users of IS_ERR_VALUE() that I could find on 32-bit ARM randconfig builds and x86 allmodconfig. For the moment, this doesn't change the definition of IS_ERR_VALUE() because there are probably still architecture specific users elsewhere. Almost all the warnings I got are for files that are better off using 'if (err)' or 'if (err < 0)'. The only legitimate user I could find that we get a warning for is the (32-bit only) freescale fman driver, so I did not remove the IS_ERR_VALUE() there but changed the type to 'unsigned long'. For 9pfs, I just worked around one user whose calling conventions are so obscure that I did not dare change the behavior. I was using this definition for testing: #define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \ unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO)) which ends up making all 16-bit or wider types work correctly with the most plausible interpretation of what IS_ERR_VALUE() was supposed to return according to its users, but also causes a compile-time warning for any users that do not pass an 'unsigned long' argument. I suggested this approach earlier this year, but back then we ended up deciding to just fix the users that are obviously broken. After the initial warning that caused me to get involved in the discussion (fs/gfs2/dir.c) showed up again in the mainline kernel, Linus asked me to send the whole thing again. [ Updated the 9p parts as per Al Viro - Linus ] Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Andrzej Hajda <a.hajda@samsung.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.org/lkml/2016/1/7/363 Link: https://lkml.org/lkml/2016/5/27/486 Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-27 21:23:25 +00:00
if (rc)
return rc;
return len;
}
EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
/**
* nvmem_device_cell_write() - Write cell to a given nvmem device
*
* @nvmem: nvmem device to be written to.
* @info: nvmem cell info to be written.
* @buf: buffer to be written to cell.
*
* Return: length of bytes written or negative error code on failure.
*/
int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell_info *info, void *buf)
{
struct nvmem_cell_entry cell;
int rc;
if (!nvmem)
return -EINVAL;
rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
remove lots of IS_ERR_VALUE abuses Most users of IS_ERR_VALUE() in the kernel are wrong, as they pass an 'int' into a function that takes an 'unsigned long' argument. This happens to work because the type is sign-extended on 64-bit architectures before it gets converted into an unsigned type. However, anything that passes an 'unsigned short' or 'unsigned int' argument into IS_ERR_VALUE() is guaranteed to be broken, as are 8-bit integers and types that are wider than 'unsigned long'. Andrzej Hajda has already fixed a lot of the worst abusers that were causing actual bugs, but it would be nice to prevent any users that are not passing 'unsigned long' arguments. This patch changes all users of IS_ERR_VALUE() that I could find on 32-bit ARM randconfig builds and x86 allmodconfig. For the moment, this doesn't change the definition of IS_ERR_VALUE() because there are probably still architecture specific users elsewhere. Almost all the warnings I got are for files that are better off using 'if (err)' or 'if (err < 0)'. The only legitimate user I could find that we get a warning for is the (32-bit only) freescale fman driver, so I did not remove the IS_ERR_VALUE() there but changed the type to 'unsigned long'. For 9pfs, I just worked around one user whose calling conventions are so obscure that I did not dare change the behavior. I was using this definition for testing: #define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \ unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO)) which ends up making all 16-bit or wider types work correctly with the most plausible interpretation of what IS_ERR_VALUE() was supposed to return according to its users, but also causes a compile-time warning for any users that do not pass an 'unsigned long' argument. I suggested this approach earlier this year, but back then we ended up deciding to just fix the users that are obviously broken. After the initial warning that caused me to get involved in the discussion (fs/gfs2/dir.c) showed up again in the mainline kernel, Linus asked me to send the whole thing again. [ Updated the 9p parts as per Al Viro - Linus ] Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Andrzej Hajda <a.hajda@samsung.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.org/lkml/2016/1/7/363 Link: https://lkml.org/lkml/2016/5/27/486 Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-27 21:23:25 +00:00
if (rc)
return rc;
return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
}
EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
/**
* nvmem_device_read() - Read from a given nvmem device
*
* @nvmem: nvmem device to read from.
* @offset: offset in nvmem device.
* @bytes: number of bytes to read.
* @buf: buffer pointer which will be populated on successful read.
*
* Return: length of successful bytes read on success and negative
* error code on error.
*/
int nvmem_device_read(struct nvmem_device *nvmem,
unsigned int offset,
size_t bytes, void *buf)
{
int rc;
if (!nvmem)
return -EINVAL;
rc = nvmem_reg_read(nvmem, offset, buf, bytes);
remove lots of IS_ERR_VALUE abuses Most users of IS_ERR_VALUE() in the kernel are wrong, as they pass an 'int' into a function that takes an 'unsigned long' argument. This happens to work because the type is sign-extended on 64-bit architectures before it gets converted into an unsigned type. However, anything that passes an 'unsigned short' or 'unsigned int' argument into IS_ERR_VALUE() is guaranteed to be broken, as are 8-bit integers and types that are wider than 'unsigned long'. Andrzej Hajda has already fixed a lot of the worst abusers that were causing actual bugs, but it would be nice to prevent any users that are not passing 'unsigned long' arguments. This patch changes all users of IS_ERR_VALUE() that I could find on 32-bit ARM randconfig builds and x86 allmodconfig. For the moment, this doesn't change the definition of IS_ERR_VALUE() because there are probably still architecture specific users elsewhere. Almost all the warnings I got are for files that are better off using 'if (err)' or 'if (err < 0)'. The only legitimate user I could find that we get a warning for is the (32-bit only) freescale fman driver, so I did not remove the IS_ERR_VALUE() there but changed the type to 'unsigned long'. For 9pfs, I just worked around one user whose calling conventions are so obscure that I did not dare change the behavior. I was using this definition for testing: #define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \ unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO)) which ends up making all 16-bit or wider types work correctly with the most plausible interpretation of what IS_ERR_VALUE() was supposed to return according to its users, but also causes a compile-time warning for any users that do not pass an 'unsigned long' argument. I suggested this approach earlier this year, but back then we ended up deciding to just fix the users that are obviously broken. After the initial warning that caused me to get involved in the discussion (fs/gfs2/dir.c) showed up again in the mainline kernel, Linus asked me to send the whole thing again. [ Updated the 9p parts as per Al Viro - Linus ] Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Andrzej Hajda <a.hajda@samsung.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.org/lkml/2016/1/7/363 Link: https://lkml.org/lkml/2016/5/27/486 Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-27 21:23:25 +00:00
if (rc)
return rc;
return bytes;
}
EXPORT_SYMBOL_GPL(nvmem_device_read);
/**
* nvmem_device_write() - Write cell to a given nvmem device
*
* @nvmem: nvmem device to be written to.
* @offset: offset in nvmem device.
* @bytes: number of bytes to write.
* @buf: buffer to be written.
*
* Return: length of bytes written or negative error code on failure.
*/
int nvmem_device_write(struct nvmem_device *nvmem,
unsigned int offset,
size_t bytes, void *buf)
{
int rc;
if (!nvmem)
return -EINVAL;
rc = nvmem_reg_write(nvmem, offset, buf, bytes);
remove lots of IS_ERR_VALUE abuses Most users of IS_ERR_VALUE() in the kernel are wrong, as they pass an 'int' into a function that takes an 'unsigned long' argument. This happens to work because the type is sign-extended on 64-bit architectures before it gets converted into an unsigned type. However, anything that passes an 'unsigned short' or 'unsigned int' argument into IS_ERR_VALUE() is guaranteed to be broken, as are 8-bit integers and types that are wider than 'unsigned long'. Andrzej Hajda has already fixed a lot of the worst abusers that were causing actual bugs, but it would be nice to prevent any users that are not passing 'unsigned long' arguments. This patch changes all users of IS_ERR_VALUE() that I could find on 32-bit ARM randconfig builds and x86 allmodconfig. For the moment, this doesn't change the definition of IS_ERR_VALUE() because there are probably still architecture specific users elsewhere. Almost all the warnings I got are for files that are better off using 'if (err)' or 'if (err < 0)'. The only legitimate user I could find that we get a warning for is the (32-bit only) freescale fman driver, so I did not remove the IS_ERR_VALUE() there but changed the type to 'unsigned long'. For 9pfs, I just worked around one user whose calling conventions are so obscure that I did not dare change the behavior. I was using this definition for testing: #define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \ unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO)) which ends up making all 16-bit or wider types work correctly with the most plausible interpretation of what IS_ERR_VALUE() was supposed to return according to its users, but also causes a compile-time warning for any users that do not pass an 'unsigned long' argument. I suggested this approach earlier this year, but back then we ended up deciding to just fix the users that are obviously broken. After the initial warning that caused me to get involved in the discussion (fs/gfs2/dir.c) showed up again in the mainline kernel, Linus asked me to send the whole thing again. [ Updated the 9p parts as per Al Viro - Linus ] Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Andrzej Hajda <a.hajda@samsung.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.org/lkml/2016/1/7/363 Link: https://lkml.org/lkml/2016/5/27/486 Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-27 21:23:25 +00:00
if (rc)
return rc;
return bytes;
}
EXPORT_SYMBOL_GPL(nvmem_device_write);
/**
* nvmem_add_cell_table() - register a table of cell info entries
*
* @table: table of cell info entries
*/
void nvmem_add_cell_table(struct nvmem_cell_table *table)
{
mutex_lock(&nvmem_cell_mutex);
list_add_tail(&table->node, &nvmem_cell_tables);
mutex_unlock(&nvmem_cell_mutex);
}
EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
/**
* nvmem_del_cell_table() - remove a previously registered cell info table
*
* @table: table of cell info entries
*/
void nvmem_del_cell_table(struct nvmem_cell_table *table)
{
mutex_lock(&nvmem_cell_mutex);
list_del(&table->node);
mutex_unlock(&nvmem_cell_mutex);
}
EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
/**
* nvmem_add_cell_lookups() - register a list of cell lookup entries
*
* @entries: array of cell lookup entries
* @nentries: number of cell lookup entries in the array
*/
void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
{
int i;
mutex_lock(&nvmem_lookup_mutex);
for (i = 0; i < nentries; i++)
list_add_tail(&entries[i].node, &nvmem_lookup_list);
mutex_unlock(&nvmem_lookup_mutex);
}
EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
/**
* nvmem_del_cell_lookups() - remove a list of previously added cell lookup
* entries
*
* @entries: array of cell lookup entries
* @nentries: number of cell lookup entries in the array
*/
void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
{
int i;
mutex_lock(&nvmem_lookup_mutex);
for (i = 0; i < nentries; i++)
list_del(&entries[i].node);
mutex_unlock(&nvmem_lookup_mutex);
}
EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
/**
* nvmem_dev_name() - Get the name of a given nvmem device.
*
* @nvmem: nvmem device.
*
* Return: name of the nvmem device.
*/
const char *nvmem_dev_name(struct nvmem_device *nvmem)
{
return dev_name(&nvmem->dev);
}
EXPORT_SYMBOL_GPL(nvmem_dev_name);
/**
* nvmem_dev_size() - Get the size of a given nvmem device.
*
* @nvmem: nvmem device.
*
* Return: size of the nvmem device.
*/
size_t nvmem_dev_size(struct nvmem_device *nvmem)
{
return nvmem->size;
}
EXPORT_SYMBOL_GPL(nvmem_dev_size);
static int __init nvmem_init(void)
{
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
int ret;
ret = bus_register(&nvmem_bus_type);
if (ret)
return ret;
ret = nvmem_layout_bus_register();
if (ret)
bus_unregister(&nvmem_bus_type);
return ret;
}
static void __exit nvmem_exit(void)
{
nvmem: core: Rework layouts to become regular devices Current layout support was initially written without modules support in mind. When the requirement for module support rose, the existing base was improved to adopt modularization support, but kind of a design flaw was introduced. With the existing implementation, when a storage device registers into NVMEM, the core tries to hook a layout (if any) and populates its cells immediately. This means, if the hardware description expects a layout to be hooked up, but no driver was provided for that, the storage medium will fail to probe and try later from scratch. Even if we consider that the hardware description shall be correct, we could still probe the storage device (especially if it contains the rootfs). One way to overcome this situation is to consider the layouts as devices, and leverage the native notifier mechanism. When a new NVMEM device is registered, we can populate its nvmem-layout child, if any, and wait for the matching to be done in order to get the cells (the waiting can be easily done with the NVMEM notifiers). If the layout driver is compiled as a module, it should automatically be loaded. This way, there is no strong order to enforce, any NVMEM device creation or NVMEM layout driver insertion will be observed as a new event which may lead to the creation of additional cells, without disturbing the probes with costly (and sometimes endless) deferrals. In order to achieve that goal we create a new bus for the nvmem-layouts with minimal logic to match nvmem-layout devices with nvmem-layout drivers. All this infrastructure code is created in the layouts.c file. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Tested-by: Rafał Miłecki <rafal@milecki.pl> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 11:15:32 +00:00
nvmem_layout_bus_unregister();
bus_unregister(&nvmem_bus_type);
}
subsys_initcall(nvmem_init);
module_exit(nvmem_exit);
MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
MODULE_DESCRIPTION("nvmem Driver Core");