iio: core: move iio_dev's buffer_list to the private iio device object

This change moves the 'buffer_list' away from the public IIO device object
into the private part.

Signed-off-by: Alexandru Ardelean <alexandru.ardelean@analog.com>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
This commit is contained in:
Alexandru Ardelean 2020-06-30 07:57:07 +03:00 committed by Jonathan Cameron
parent 207c2d27a0
commit 6a8c6b26f7
4 changed files with 27 additions and 17 deletions

View File

@ -19,6 +19,7 @@
#include <linux/sched/signal.h>
#include <linux/iio/iio.h>
#include <linux/iio/iio-opaque.h>
#include "iio_core.h"
#include "iio_core_trigger.h"
#include <linux/iio/sysfs.h>
@ -599,8 +600,10 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
static void iio_buffer_activate(struct iio_dev *indio_dev,
struct iio_buffer *buffer)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
iio_buffer_get(buffer);
list_add(&buffer->buffer_list, &indio_dev->buffer_list);
list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
}
static void iio_buffer_deactivate(struct iio_buffer *buffer)
@ -612,10 +615,11 @@ static void iio_buffer_deactivate(struct iio_buffer *buffer)
static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer, *_buffer;
list_for_each_entry_safe(buffer, _buffer,
&indio_dev->buffer_list, buffer_list)
&iio_dev_opaque->buffer_list, buffer_list)
iio_buffer_deactivate(buffer);
}
@ -688,6 +692,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
struct iio_device_config *config)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
unsigned long *compound_mask;
const unsigned long *scan_mask;
bool strict_scanmask = false;
@ -710,12 +715,12 @@ static int iio_verify_update(struct iio_dev *indio_dev,
* to verify.
*/
if (remove_buffer && !insert_buffer &&
list_is_singular(&indio_dev->buffer_list))
list_is_singular(&iio_dev_opaque->buffer_list))
return 0;
modes = indio_dev->modes;
list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
if (buffer == remove_buffer)
continue;
modes &= buffer->access->modes;
@ -736,7 +741,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
* Keep things simple for now and only allow a single buffer to
* be connected in hardware mode.
*/
if (insert_buffer && !list_empty(&indio_dev->buffer_list))
if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
return -EINVAL;
config->mode = INDIO_BUFFER_HARDWARE;
strict_scanmask = true;
@ -756,7 +761,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
scan_timestamp = false;
list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
if (buffer == remove_buffer)
continue;
bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
@ -902,10 +907,11 @@ error_clear_mux_table:
static int iio_update_demux(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer;
int ret;
list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
ret = iio_buffer_update_demux(indio_dev, buffer);
if (ret < 0)
goto error_clear_mux_table;
@ -913,7 +919,7 @@ static int iio_update_demux(struct iio_dev *indio_dev)
return 0;
error_clear_mux_table:
list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
iio_buffer_demux_free(buffer);
return ret;
@ -922,6 +928,7 @@ error_clear_mux_table:
static int iio_enable_buffers(struct iio_dev *indio_dev,
struct iio_device_config *config)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer;
int ret;
@ -958,7 +965,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
indio_dev->info->hwfifo_set_watermark(indio_dev,
config->watermark);
list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
ret = iio_buffer_enable(buffer, indio_dev);
if (ret)
goto err_disable_buffers;
@ -983,7 +990,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
return 0;
err_disable_buffers:
list_for_each_entry_continue_reverse(buffer, &indio_dev->buffer_list,
list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
buffer_list)
iio_buffer_disable(buffer, indio_dev);
err_run_postdisable:
@ -998,12 +1005,13 @@ err_undo_config:
static int iio_disable_buffers(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer;
int ret = 0;
int ret2;
/* Wind down existing buffers - iff there are any */
if (list_empty(&indio_dev->buffer_list))
if (list_empty(&iio_dev_opaque->buffer_list))
return 0;
if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
@ -1024,7 +1032,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev)
ret = ret2;
}
list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
ret2 = iio_buffer_disable(buffer, indio_dev);
if (ret2 && !ret)
ret = ret2;
@ -1047,6 +1055,7 @@ static int __iio_update_buffers(struct iio_dev *indio_dev,
struct iio_buffer *insert_buffer,
struct iio_buffer *remove_buffer)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_device_config new_config;
int ret;
@ -1071,7 +1080,7 @@ static int __iio_update_buffers(struct iio_dev *indio_dev,
iio_buffer_activate(indio_dev, insert_buffer);
/* If no buffers in list, we are done */
if (list_empty(&indio_dev->buffer_list))
if (list_empty(&iio_dev_opaque->buffer_list))
return 0;
ret = iio_enable_buffers(indio_dev, &new_config);
@ -1420,10 +1429,11 @@ static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
*/
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int ret;
struct iio_buffer *buf;
list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
ret = iio_push_to_buffer(buf, data);
if (ret < 0)
return ret;

View File

@ -1559,7 +1559,7 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
return NULL;
}
dev_set_name(&dev->dev, "iio:device%d", dev->id);
INIT_LIST_HEAD(&dev->buffer_list);
INIT_LIST_HEAD(&iio_dev_opaque->buffer_list);
return dev;
}

View File

@ -6,6 +6,7 @@
/**
* struct iio_dev_opaque - industrial I/O device opaque information
* @indio_dev: public industrial I/O device information
* @buffer_list: list of all buffers currently attached
* @channel_attr_list: keep track of automatically created channel
* attributes
* @chan_attr_group: group for all attrs in base directory
@ -16,6 +17,7 @@
*/
struct iio_dev_opaque {
struct iio_dev indio_dev;
struct list_head buffer_list;
struct list_head channel_attr_list;
struct attribute_group chan_attr_group;
#if defined(CONFIG_DEBUG_FS)

View File

@ -490,7 +490,6 @@ struct iio_buffer_setup_ops {
* and owner
* @event_interface: [INTERN] event chrdevs associated with interrupt lines
* @buffer: [DRIVER] any buffer present
* @buffer_list: [INTERN] list of all buffers currently attached
* @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
* @mlock: [INTERN] lock used to prevent simultaneous device state
* changes
@ -531,7 +530,6 @@ struct iio_dev {
struct iio_event_interface *event_interface;
struct iio_buffer *buffer;
struct list_head buffer_list;
int scan_bytes;
struct mutex mlock;