2013-01-08 10:06:31 +00:00
|
|
|
/*
|
|
|
|
* V4L2 asynchronous subdevice registration API
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/i2c.h>
|
|
|
|
#include <linux/list.h>
|
[media] v4l2-core: Use kvmalloc() for potentially big allocations
There are multiple places where arrays or otherwise variable sized
buffer are allocated through V4L2 core code, including things like
controls, memory pages, staging buffers for ioctls and so on. Such
allocations can potentially require an order > 0 allocation from the
page allocator, which is not guaranteed to be fulfilled and is likely to
fail on a system with severe memory fragmentation (e.g. a system with
very long uptime).
Since the memory being allocated is intended to be used by the CPU
exclusively, we can consider using vmalloc() as a fallback and this is
exactly what the recently merged kvmalloc() helpers do. A kmalloc() call
is still attempted, even for order > 0 allocations, but it is done
with __GFP_NORETRY and __GFP_NOWARN, with expectation of failing if
requested memory is not available instantly. Only then the vmalloc()
fallback is used. This should give us fast and more reliable allocations
even on systems with higher memory pressure and/or more fragmentation,
while still retaining the same performance level on systems not
suffering from such conditions.
While at it, replace explicit array size calculations on changed
allocations with kvmalloc_array().
Purposedly not touching videobuf1, as it is deprecated, has only few
users remaining and would rather be seen removed instead.
Signed-off-by: Tomasz Figa <tfiga@chromium.org>
Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2017-06-19 03:53:43 +00:00
|
|
|
#include <linux/mm.h>
|
2013-01-08 10:06:31 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/mutex.h>
|
2016-08-16 09:54:59 +00:00
|
|
|
#include <linux/of.h>
|
2013-01-08 10:06:31 +00:00
|
|
|
#include <linux/platform_device.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
|
|
|
#include <media/v4l2-async.h>
|
|
|
|
#include <media/v4l2-device.h>
|
|
|
|
#include <media/v4l2-subdev.h>
|
|
|
|
|
2015-06-11 19:18:01 +00:00
|
|
|
static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
|
2013-01-08 10:06:31 +00:00
|
|
|
{
|
2013-06-24 08:13:51 +00:00
|
|
|
#if IS_ENABLED(CONFIG_I2C)
|
2015-06-11 19:18:01 +00:00
|
|
|
struct i2c_client *client = i2c_verify_client(sd->dev);
|
2013-01-08 10:06:31 +00:00
|
|
|
return client &&
|
|
|
|
asd->match.i2c.adapter_id == client->adapter->nr &&
|
|
|
|
asd->match.i2c.address == client->addr;
|
2013-06-24 08:13:51 +00:00
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
2013-01-08 10:06:31 +00:00
|
|
|
}
|
|
|
|
|
2015-06-11 19:18:01 +00:00
|
|
|
static bool match_devname(struct v4l2_subdev *sd,
|
|
|
|
struct v4l2_async_subdev *asd)
|
2013-01-08 10:06:31 +00:00
|
|
|
{
|
2015-06-11 19:18:01 +00:00
|
|
|
return !strcmp(asd->match.device_name.name, dev_name(sd->dev));
|
2013-01-08 10:06:31 +00:00
|
|
|
}
|
|
|
|
|
2016-08-16 09:54:59 +00:00
|
|
|
static bool match_fwnode(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
|
|
|
|
{
|
2017-07-20 22:06:22 +00:00
|
|
|
return sd->fwnode == asd->match.fwnode.fwnode;
|
2016-08-16 09:54:59 +00:00
|
|
|
}
|
|
|
|
|
2015-06-11 19:18:01 +00:00
|
|
|
static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
|
|
|
|
{
|
|
|
|
if (!asd->match.custom.match)
|
|
|
|
/* Match always */
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return asd->match.custom.match(sd->dev, asd);
|
2013-07-19 15:21:29 +00:00
|
|
|
}
|
|
|
|
|
2013-01-08 10:06:31 +00:00
|
|
|
static LIST_HEAD(subdev_list);
|
|
|
|
static LIST_HEAD(notifier_list);
|
|
|
|
static DEFINE_MUTEX(list_lock);
|
|
|
|
|
|
|
|
static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
|
2013-07-22 11:01:33 +00:00
|
|
|
struct v4l2_subdev *sd)
|
2013-01-08 10:06:31 +00:00
|
|
|
{
|
2015-06-11 19:18:01 +00:00
|
|
|
bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *);
|
2013-01-08 10:06:31 +00:00
|
|
|
struct v4l2_async_subdev *asd;
|
|
|
|
|
|
|
|
list_for_each_entry(asd, ¬ifier->waiting, list) {
|
|
|
|
/* bus_type has been verified valid before */
|
2013-07-19 15:14:46 +00:00
|
|
|
switch (asd->match_type) {
|
|
|
|
case V4L2_ASYNC_MATCH_CUSTOM:
|
2015-06-11 19:18:01 +00:00
|
|
|
match = match_custom;
|
2013-01-08 10:06:31 +00:00
|
|
|
break;
|
2013-07-19 15:14:46 +00:00
|
|
|
case V4L2_ASYNC_MATCH_DEVNAME:
|
|
|
|
match = match_devname;
|
2013-01-08 10:06:31 +00:00
|
|
|
break;
|
2013-07-19 15:14:46 +00:00
|
|
|
case V4L2_ASYNC_MATCH_I2C:
|
2013-01-08 10:06:31 +00:00
|
|
|
match = match_i2c;
|
|
|
|
break;
|
2016-08-16 09:54:59 +00:00
|
|
|
case V4L2_ASYNC_MATCH_FWNODE:
|
|
|
|
match = match_fwnode;
|
|
|
|
break;
|
2013-01-08 10:06:31 +00:00
|
|
|
default:
|
|
|
|
/* Cannot happen, unless someone breaks us */
|
|
|
|
WARN_ON(true);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* match cannot be NULL here */
|
2015-06-11 19:18:01 +00:00
|
|
|
if (match(sd, asd))
|
2013-01-08 10:06:31 +00:00
|
|
|
return asd;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
|
2013-07-22 11:01:33 +00:00
|
|
|
struct v4l2_subdev *sd,
|
2013-01-08 10:06:31 +00:00
|
|
|
struct v4l2_async_subdev *asd)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (notifier->bound) {
|
|
|
|
ret = notifier->bound(notifier, sd, asd);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
|
|
|
|
if (ret < 0) {
|
|
|
|
if (notifier->unbind)
|
|
|
|
notifier->unbind(notifier, sd, asd);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
[media] v4l2-async: failing functions shouldn't have side effects
v4l2-async had several functions doing some operations and then
not undoing the operations in a failure situation. For example,
v4l2_async_test_notify() moved a subdev into notifier's done list
even if registering the subdev (v4l2_device_register_subdev) failed.
If the subdev was allocated and v4l2_async_register_subdev() called
from the driver's probe() function, as usually, the probe()
function freed the allocated subdev and returned a failure.
Nevertheless, the subdev was still left into the notifier's done
list, causing an access to already freed memory when the notifier
was later unregistered.
A hand-edited call trace leaving freed subdevs into the notifier:
v4l2_async_register_notifier(notifier, asd)
cameradrv_probe
sd = devm_kzalloc()
v4l2_async_register_subdev(sd)
v4l2_async_test_notify(notifier, sd, asd)
list_move(sd, ¬ifier->done)
v4l2_device_register_subdev(notifier->v4l2_dev, sd)
cameradrv_registered(sd) -> fails
->v4l2_async_register_subdev returns failure
->cameradrv_probe returns failure
->devres frees the allocated sd
->sd was freed but it still remains in the notifier's list.
This patch fixes this and several other cases where a failing
function could leave nodes into a linked list while the caller
might free the node due to a failure.
Signed-off-by: Tuukka Toivonen <tuukka.toivonen@intel.com>
Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2017-01-27 10:32:56 +00:00
|
|
|
/* Remove from the waiting list */
|
|
|
|
list_del(&asd->list);
|
|
|
|
sd->asd = asd;
|
|
|
|
sd->notifier = notifier;
|
|
|
|
|
|
|
|
/* Move from the global subdevice list to notifier's done */
|
|
|
|
list_move(&sd->async_list, ¬ifier->done);
|
|
|
|
|
2013-01-08 10:06:31 +00:00
|
|
|
if (list_empty(¬ifier->waiting) && notifier->complete)
|
|
|
|
return notifier->complete(notifier);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-22 11:01:33 +00:00
|
|
|
static void v4l2_async_cleanup(struct v4l2_subdev *sd)
|
2013-01-08 10:06:31 +00:00
|
|
|
{
|
|
|
|
v4l2_device_unregister_subdev(sd);
|
2013-07-22 11:01:33 +00:00
|
|
|
/* Subdevice driver will reprobe and put the subdev back onto the list */
|
|
|
|
list_del_init(&sd->async_list);
|
|
|
|
sd->asd = NULL;
|
2013-01-08 10:06:31 +00:00
|
|
|
sd->dev = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
|
|
|
|
struct v4l2_async_notifier *notifier)
|
|
|
|
{
|
2013-07-22 11:01:33 +00:00
|
|
|
struct v4l2_subdev *sd, *tmp;
|
2013-01-08 10:06:31 +00:00
|
|
|
struct v4l2_async_subdev *asd;
|
|
|
|
int i;
|
|
|
|
|
2017-06-13 14:30:35 +00:00
|
|
|
if (!v4l2_dev || !notifier->num_subdevs ||
|
|
|
|
notifier->num_subdevs > V4L2_MAX_SUBDEVS)
|
2013-01-08 10:06:31 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
notifier->v4l2_dev = v4l2_dev;
|
|
|
|
INIT_LIST_HEAD(¬ifier->waiting);
|
|
|
|
INIT_LIST_HEAD(¬ifier->done);
|
|
|
|
|
|
|
|
for (i = 0; i < notifier->num_subdevs; i++) {
|
2013-07-19 15:31:10 +00:00
|
|
|
asd = notifier->subdevs[i];
|
2013-01-08 10:06:31 +00:00
|
|
|
|
2013-07-19 15:14:46 +00:00
|
|
|
switch (asd->match_type) {
|
|
|
|
case V4L2_ASYNC_MATCH_CUSTOM:
|
|
|
|
case V4L2_ASYNC_MATCH_DEVNAME:
|
|
|
|
case V4L2_ASYNC_MATCH_I2C:
|
2016-08-16 09:54:59 +00:00
|
|
|
case V4L2_ASYNC_MATCH_FWNODE:
|
2013-01-08 10:06:31 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
|
2013-07-19 15:14:46 +00:00
|
|
|
"Invalid match type %u on %p\n",
|
|
|
|
asd->match_type, asd);
|
2013-01-08 10:06:31 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
list_add_tail(&asd->list, ¬ifier->waiting);
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_lock(&list_lock);
|
|
|
|
|
2013-07-22 11:01:33 +00:00
|
|
|
list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
|
2013-01-08 10:06:31 +00:00
|
|
|
int ret;
|
|
|
|
|
2013-07-22 11:01:33 +00:00
|
|
|
asd = v4l2_async_belongs(notifier, sd);
|
2013-01-08 10:06:31 +00:00
|
|
|
if (!asd)
|
|
|
|
continue;
|
|
|
|
|
2013-07-22 11:01:33 +00:00
|
|
|
ret = v4l2_async_test_notify(notifier, sd, asd);
|
2013-01-08 10:06:31 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
mutex_unlock(&list_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[media] v4l2-async: failing functions shouldn't have side effects
v4l2-async had several functions doing some operations and then
not undoing the operations in a failure situation. For example,
v4l2_async_test_notify() moved a subdev into notifier's done list
even if registering the subdev (v4l2_device_register_subdev) failed.
If the subdev was allocated and v4l2_async_register_subdev() called
from the driver's probe() function, as usually, the probe()
function freed the allocated subdev and returned a failure.
Nevertheless, the subdev was still left into the notifier's done
list, causing an access to already freed memory when the notifier
was later unregistered.
A hand-edited call trace leaving freed subdevs into the notifier:
v4l2_async_register_notifier(notifier, asd)
cameradrv_probe
sd = devm_kzalloc()
v4l2_async_register_subdev(sd)
v4l2_async_test_notify(notifier, sd, asd)
list_move(sd, ¬ifier->done)
v4l2_device_register_subdev(notifier->v4l2_dev, sd)
cameradrv_registered(sd) -> fails
->v4l2_async_register_subdev returns failure
->cameradrv_probe returns failure
->devres frees the allocated sd
->sd was freed but it still remains in the notifier's list.
This patch fixes this and several other cases where a failing
function could leave nodes into a linked list while the caller
might free the node due to a failure.
Signed-off-by: Tuukka Toivonen <tuukka.toivonen@intel.com>
Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2017-01-27 10:32:56 +00:00
|
|
|
/* Keep also completed notifiers on the list */
|
|
|
|
list_add(¬ifier->list, ¬ifier_list);
|
|
|
|
|
2013-01-08 10:06:31 +00:00
|
|
|
mutex_unlock(&list_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(v4l2_async_notifier_register);
|
|
|
|
|
|
|
|
void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
|
|
|
|
{
|
2013-07-22 11:01:33 +00:00
|
|
|
struct v4l2_subdev *sd, *tmp;
|
2013-01-08 10:06:31 +00:00
|
|
|
unsigned int notif_n_subdev = notifier->num_subdevs;
|
|
|
|
unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
|
2013-11-02 09:20:16 +00:00
|
|
|
struct device **dev;
|
2013-01-08 10:06:31 +00:00
|
|
|
int i = 0;
|
|
|
|
|
2013-07-03 10:49:06 +00:00
|
|
|
if (!notifier->v4l2_dev)
|
|
|
|
return;
|
|
|
|
|
[media] v4l2-core: Use kvmalloc() for potentially big allocations
There are multiple places where arrays or otherwise variable sized
buffer are allocated through V4L2 core code, including things like
controls, memory pages, staging buffers for ioctls and so on. Such
allocations can potentially require an order > 0 allocation from the
page allocator, which is not guaranteed to be fulfilled and is likely to
fail on a system with severe memory fragmentation (e.g. a system with
very long uptime).
Since the memory being allocated is intended to be used by the CPU
exclusively, we can consider using vmalloc() as a fallback and this is
exactly what the recently merged kvmalloc() helpers do. A kmalloc() call
is still attempted, even for order > 0 allocations, but it is done
with __GFP_NORETRY and __GFP_NOWARN, with expectation of failing if
requested memory is not available instantly. Only then the vmalloc()
fallback is used. This should give us fast and more reliable allocations
even on systems with higher memory pressure and/or more fragmentation,
while still retaining the same performance level on systems not
suffering from such conditions.
While at it, replace explicit array size calculations on changed
allocations with kvmalloc_array().
Purposedly not touching videobuf1, as it is deprecated, has only few
users remaining and would rather be seen removed instead.
Signed-off-by: Tomasz Figa <tfiga@chromium.org>
Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2017-06-19 03:53:43 +00:00
|
|
|
dev = kvmalloc_array(n_subdev, sizeof(*dev), GFP_KERNEL);
|
2013-11-02 09:20:16 +00:00
|
|
|
if (!dev) {
|
|
|
|
dev_err(notifier->v4l2_dev->dev,
|
|
|
|
"Failed to allocate device cache!\n");
|
|
|
|
}
|
|
|
|
|
2013-01-08 10:06:31 +00:00
|
|
|
mutex_lock(&list_lock);
|
|
|
|
|
|
|
|
list_del(¬ifier->list);
|
|
|
|
|
2013-07-31 16:10:18 +00:00
|
|
|
list_for_each_entry_safe(sd, tmp, ¬ifier->done, async_list) {
|
2013-11-02 09:20:16 +00:00
|
|
|
struct device *d;
|
|
|
|
|
|
|
|
d = get_device(sd->dev);
|
2013-01-08 10:06:31 +00:00
|
|
|
|
2013-07-22 11:01:33 +00:00
|
|
|
v4l2_async_cleanup(sd);
|
2013-01-08 10:06:31 +00:00
|
|
|
|
|
|
|
/* If we handled USB devices, we'd have to lock the parent too */
|
2013-11-02 09:20:16 +00:00
|
|
|
device_release_driver(d);
|
2013-01-08 10:06:31 +00:00
|
|
|
|
|
|
|
if (notifier->unbind)
|
2013-07-22 11:01:33 +00:00
|
|
|
notifier->unbind(notifier, sd, sd->asd);
|
2013-11-02 09:20:16 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Store device at the device cache, in order to call
|
|
|
|
* put_device() on the final step
|
|
|
|
*/
|
|
|
|
if (dev)
|
|
|
|
dev[i++] = d;
|
|
|
|
else
|
|
|
|
put_device(d);
|
2013-01-08 10:06:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mutex_unlock(&list_lock);
|
|
|
|
|
2013-11-02 09:20:16 +00:00
|
|
|
/*
|
|
|
|
* Call device_attach() to reprobe devices
|
|
|
|
*
|
|
|
|
* NOTE: If dev allocation fails, i is 0, and the whole loop won't be
|
|
|
|
* executed.
|
|
|
|
*/
|
2013-01-08 10:06:31 +00:00
|
|
|
while (i--) {
|
|
|
|
struct device *d = dev[i];
|
|
|
|
|
|
|
|
if (d && device_attach(d) < 0) {
|
|
|
|
const char *name = "(none)";
|
|
|
|
int lock = device_trylock(d);
|
|
|
|
|
|
|
|
if (lock && d->driver)
|
|
|
|
name = d->driver->name;
|
|
|
|
dev_err(d, "Failed to re-probe to %s\n", name);
|
|
|
|
if (lock)
|
|
|
|
device_unlock(d);
|
|
|
|
}
|
|
|
|
put_device(d);
|
|
|
|
}
|
[media] v4l2-core: Use kvmalloc() for potentially big allocations
There are multiple places where arrays or otherwise variable sized
buffer are allocated through V4L2 core code, including things like
controls, memory pages, staging buffers for ioctls and so on. Such
allocations can potentially require an order > 0 allocation from the
page allocator, which is not guaranteed to be fulfilled and is likely to
fail on a system with severe memory fragmentation (e.g. a system with
very long uptime).
Since the memory being allocated is intended to be used by the CPU
exclusively, we can consider using vmalloc() as a fallback and this is
exactly what the recently merged kvmalloc() helpers do. A kmalloc() call
is still attempted, even for order > 0 allocations, but it is done
with __GFP_NORETRY and __GFP_NOWARN, with expectation of failing if
requested memory is not available instantly. Only then the vmalloc()
fallback is used. This should give us fast and more reliable allocations
even on systems with higher memory pressure and/or more fragmentation,
while still retaining the same performance level on systems not
suffering from such conditions.
While at it, replace explicit array size calculations on changed
allocations with kvmalloc_array().
Purposedly not touching videobuf1, as it is deprecated, has only few
users remaining and would rather be seen removed instead.
Signed-off-by: Tomasz Figa <tfiga@chromium.org>
Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2017-06-19 03:53:43 +00:00
|
|
|
kvfree(dev);
|
2013-07-03 10:49:06 +00:00
|
|
|
|
|
|
|
notifier->v4l2_dev = NULL;
|
|
|
|
|
2013-01-08 10:06:31 +00:00
|
|
|
/*
|
|
|
|
* Don't care about the waiting list, it is initialised and populated
|
|
|
|
* upon notifier registration.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(v4l2_async_notifier_unregister);
|
|
|
|
|
|
|
|
int v4l2_async_register_subdev(struct v4l2_subdev *sd)
|
|
|
|
{
|
|
|
|
struct v4l2_async_notifier *notifier;
|
|
|
|
|
2015-06-11 19:18:01 +00:00
|
|
|
/*
|
|
|
|
* No reference taken. The reference is held by the device
|
|
|
|
* (struct v4l2_subdev.dev), and async sub-device does not
|
|
|
|
* exist independently of the device at any point of time.
|
|
|
|
*/
|
2016-08-26 23:17:25 +00:00
|
|
|
if (!sd->fwnode && sd->dev)
|
|
|
|
sd->fwnode = dev_fwnode(sd->dev);
|
2015-06-11 19:18:01 +00:00
|
|
|
|
2013-01-08 10:06:31 +00:00
|
|
|
mutex_lock(&list_lock);
|
|
|
|
|
2013-07-22 11:01:33 +00:00
|
|
|
INIT_LIST_HEAD(&sd->async_list);
|
2013-01-08 10:06:31 +00:00
|
|
|
|
|
|
|
list_for_each_entry(notifier, ¬ifier_list, list) {
|
2013-07-22 11:01:33 +00:00
|
|
|
struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
|
2013-01-08 10:06:31 +00:00
|
|
|
if (asd) {
|
2013-07-22 11:01:33 +00:00
|
|
|
int ret = v4l2_async_test_notify(notifier, sd, asd);
|
2013-01-08 10:06:31 +00:00
|
|
|
mutex_unlock(&list_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* None matched, wait for hot-plugging */
|
2013-07-22 11:01:33 +00:00
|
|
|
list_add(&sd->async_list, &subdev_list);
|
2013-01-08 10:06:31 +00:00
|
|
|
|
|
|
|
mutex_unlock(&list_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(v4l2_async_register_subdev);
|
|
|
|
|
|
|
|
void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
|
|
|
|
{
|
2013-07-22 11:01:33 +00:00
|
|
|
struct v4l2_async_notifier *notifier = sd->notifier;
|
2013-01-08 10:06:31 +00:00
|
|
|
|
2013-07-22 11:01:33 +00:00
|
|
|
if (!sd->asd) {
|
|
|
|
if (!list_empty(&sd->async_list))
|
|
|
|
v4l2_async_cleanup(sd);
|
2013-01-08 10:06:31 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_lock(&list_lock);
|
|
|
|
|
2013-07-22 11:01:33 +00:00
|
|
|
list_add(&sd->asd->list, ¬ifier->waiting);
|
2013-01-08 10:06:31 +00:00
|
|
|
|
2013-07-22 11:01:33 +00:00
|
|
|
v4l2_async_cleanup(sd);
|
2013-01-08 10:06:31 +00:00
|
|
|
|
|
|
|
if (notifier->unbind)
|
2013-07-22 11:01:33 +00:00
|
|
|
notifier->unbind(notifier, sd, sd->asd);
|
2013-01-08 10:06:31 +00:00
|
|
|
|
|
|
|
mutex_unlock(&list_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(v4l2_async_unregister_subdev);
|