2017-11-07 16:30:07 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* platform.c - platform 'pseudo' bus for legacy devices
|
|
|
|
*
|
|
|
|
* Copyright (c) 2002-3 Patrick Mochel
|
|
|
|
* Copyright (c) 2002-3 Open Source Development Labs
|
|
|
|
*
|
2019-06-18 15:34:59 +00:00
|
|
|
* Please see Documentation/driver-api/driver-model/platform.rst for more
|
2005-04-16 22:20:36 +00:00
|
|
|
* information.
|
|
|
|
*/
|
|
|
|
|
2009-08-06 23:00:44 +00:00
|
|
|
#include <linux/string.h>
|
2005-10-29 18:07:23 +00:00
|
|
|
#include <linux/platform_device.h>
|
2010-06-08 13:48:20 +00:00
|
|
|
#include <linux/of_device.h>
|
2014-04-23 22:57:41 +00:00
|
|
|
#include <linux/of_irq.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
2020-12-02 10:36:56 +00:00
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/ioport.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/dma-mapping.h>
|
2018-10-30 22:09:49 +00:00
|
|
|
#include <linux/memblock.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/err.h>
|
2005-10-30 23:03:48 +00:00
|
|
|
#include <linux/slab.h>
|
2009-08-20 18:25:32 +00:00
|
|
|
#include <linux/pm_runtime.h>
|
2014-09-29 11:58:47 +00:00
|
|
|
#include <linux/pm_domain.h>
|
2012-07-27 20:14:59 +00:00
|
|
|
#include <linux/idr.h>
|
2012-10-31 21:45:02 +00:00
|
|
|
#include <linux/acpi.h>
|
2014-06-18 15:29:32 +00:00
|
|
|
#include <linux/clk/clk-conf.h>
|
2014-06-03 00:42:58 +00:00
|
|
|
#include <linux/limits.h>
|
2015-11-30 15:11:38 +00:00
|
|
|
#include <linux/property.h>
|
2019-01-03 23:29:05 +00:00
|
|
|
#include <linux/kmemleak.h>
|
2019-12-10 22:41:37 +00:00
|
|
|
#include <linux/types.h>
|
bus: platform,amba,fsl-mc,PCI: Add device DMA ownership management
The devices on platform/amba/fsl-mc/PCI buses could be bound to drivers
with the device DMA managed by kernel drivers or user-space applications.
Unfortunately, multiple devices may be placed in the same IOMMU group
because they cannot be isolated from each other. The DMA on these devices
must either be entirely under kernel control or userspace control, never
a mixture. Otherwise the driver integrity is not guaranteed because they
could access each other through the peer-to-peer accesses which by-pass
the IOMMU protection.
This checks and sets the default DMA mode during driver binding, and
cleanups during driver unbinding. In the default mode, the device DMA is
managed by the device driver which handles DMA operations through the
kernel DMA APIs (see Documentation/core-api/dma-api.rst).
For cases where the devices are assigned for userspace control through the
userspace driver framework(i.e. VFIO), the drivers(for example, vfio_pci/
vfio_platfrom etc.) may set a new flag (driver_managed_dma) to skip this
default setting in the assumption that the drivers know what they are
doing with the device DMA.
Calling iommu_device_use_default_domain() before {of,acpi}_dma_configure
is currently a problem. As things stand, the IOMMU driver ignored the
initial iommu_probe_device() call when the device was added, since at
that point it had no fwspec yet. In this situation,
{of,acpi}_iommu_configure() are retriggering iommu_probe_device() after
the IOMMU driver has seen the firmware data via .of_xlate to learn that
it actually responsible for the given device. As the result, before
that gets fixed, iommu_use_default_domain() goes at the end, and calls
arch_teardown_dma_ops() if it fails.
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Stuart Yoder <stuyoder@gmail.com>
Cc: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Tested-by: Eric Auger <eric.auger@redhat.com>
Link: https://lore.kernel.org/r/20220418005000.897664-5-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-04-18 00:49:53 +00:00
|
|
|
#include <linux/iommu.h>
|
|
|
|
#include <linux/dma-map-ops.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-13 16:54:41 +00:00
|
|
|
#include "base.h"
|
2012-08-05 23:45:11 +00:00
|
|
|
#include "power/power.h"
|
2005-10-13 16:54:41 +00:00
|
|
|
|
2012-07-27 20:14:59 +00:00
|
|
|
/* For automatically allocated device IDs */
|
|
|
|
static DEFINE_IDA(platform_devid_ida);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
struct device platform_bus = {
|
2008-10-30 00:36:48 +00:00
|
|
|
.init_name = "platform",
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
2005-12-10 06:36:28 +00:00
|
|
|
EXPORT_SYMBOL_GPL(platform_bus);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/**
|
2008-01-25 06:50:12 +00:00
|
|
|
* platform_get_resource - get a resource for a device
|
|
|
|
* @dev: platform device
|
|
|
|
* @type: resource type
|
|
|
|
* @num: resource index
|
2020-09-10 06:04:40 +00:00
|
|
|
*
|
|
|
|
* Return: a pointer to the resource or NULL on failure.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-01-25 06:50:12 +00:00
|
|
|
struct resource *platform_get_resource(struct platform_device *dev,
|
|
|
|
unsigned int type, unsigned int num)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2019-12-10 22:41:37 +00:00
|
|
|
u32 i;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
for (i = 0; i < dev->num_resources; i++) {
|
|
|
|
struct resource *r = &dev->resource[i];
|
|
|
|
|
2008-10-16 05:05:15 +00:00
|
|
|
if (type == resource_type(r) && num-- == 0)
|
|
|
|
return r;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-12-10 06:36:28 +00:00
|
|
|
EXPORT_SYMBOL_GPL(platform_get_resource);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-12-09 20:36:38 +00:00
|
|
|
struct resource *platform_get_mem_or_io(struct platform_device *dev,
|
|
|
|
unsigned int num)
|
|
|
|
{
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
for (i = 0; i < dev->num_resources; i++) {
|
|
|
|
struct resource *r = &dev->resource[i];
|
|
|
|
|
|
|
|
if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(platform_get_mem_or_io);
|
|
|
|
|
2019-10-22 08:43:14 +00:00
|
|
|
#ifdef CONFIG_HAS_IOMEM
|
2020-03-23 16:06:08 +00:00
|
|
|
/**
|
|
|
|
* devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a
|
|
|
|
* platform device and get resource
|
|
|
|
*
|
|
|
|
* @pdev: platform device to use both for memory resource lookup as well as
|
|
|
|
* resource management
|
|
|
|
* @index: resource index
|
|
|
|
* @res: optional output parameter to store a pointer to the obtained resource.
|
2020-09-10 06:04:40 +00:00
|
|
|
*
|
|
|
|
* Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
|
|
|
|
* on failure.
|
2020-03-23 16:06:08 +00:00
|
|
|
*/
|
|
|
|
void __iomem *
|
|
|
|
devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
|
|
|
|
unsigned int index, struct resource **res)
|
|
|
|
{
|
|
|
|
struct resource *r;
|
|
|
|
|
|
|
|
r = platform_get_resource(pdev, IORESOURCE_MEM, index);
|
|
|
|
if (res)
|
|
|
|
*res = r;
|
|
|
|
return devm_ioremap_resource(&pdev->dev, r);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource);
|
|
|
|
|
2019-02-20 11:12:39 +00:00
|
|
|
/**
|
|
|
|
* devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
|
|
|
|
* device
|
|
|
|
*
|
|
|
|
* @pdev: platform device to use both for memory resource lookup as well as
|
2019-04-01 08:16:35 +00:00
|
|
|
* resource management
|
2019-02-20 11:12:39 +00:00
|
|
|
* @index: resource index
|
2020-09-10 06:04:40 +00:00
|
|
|
*
|
|
|
|
* Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
|
|
|
|
* on failure.
|
2019-02-20 11:12:39 +00:00
|
|
|
*/
|
|
|
|
void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
|
|
|
|
unsigned int index)
|
|
|
|
{
|
2020-03-23 16:06:12 +00:00
|
|
|
return devm_platform_get_and_ioremap_resource(pdev, index, NULL);
|
2019-02-20 11:12:39 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
|
2019-10-22 08:43:14 +00:00
|
|
|
|
2019-10-22 08:43:16 +00:00
|
|
|
/**
|
|
|
|
* devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
|
|
|
|
* a platform device, retrieve the
|
|
|
|
* resource by name
|
|
|
|
*
|
|
|
|
* @pdev: platform device to use both for memory resource lookup as well as
|
|
|
|
* resource management
|
|
|
|
* @name: name of the resource
|
2020-09-10 06:04:40 +00:00
|
|
|
*
|
|
|
|
* Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
|
|
|
|
* on failure.
|
2019-10-22 08:43:16 +00:00
|
|
|
*/
|
|
|
|
void __iomem *
|
|
|
|
devm_platform_ioremap_resource_byname(struct platform_device *pdev,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
struct resource *res;
|
|
|
|
|
|
|
|
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
|
|
|
|
return devm_ioremap_resource(&pdev->dev, res);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
|
2019-02-21 16:26:27 +00:00
|
|
|
#endif /* CONFIG_HAS_IOMEM */
|
2019-02-20 11:12:39 +00:00
|
|
|
|
2021-04-07 09:47:56 +00:00
|
|
|
/**
|
|
|
|
* platform_get_irq_optional - get an optional IRQ for a device
|
|
|
|
* @dev: platform device
|
|
|
|
* @num: IRQ number index
|
|
|
|
*
|
|
|
|
* Gets an IRQ for a platform device. Device drivers should check the return
|
|
|
|
* value for errors so as to not pass a negative integer value to the
|
|
|
|
* request_irq() APIs. This is the same as platform_get_irq(), except that it
|
|
|
|
* does not print an error message if an IRQ can not be obtained.
|
|
|
|
*
|
|
|
|
* For example::
|
|
|
|
*
|
|
|
|
* int irq = platform_get_irq_optional(pdev, 0);
|
|
|
|
* if (irq < 0)
|
|
|
|
* return irq;
|
|
|
|
*
|
|
|
|
* Return: non-zero IRQ number on success, negative error number on failure.
|
|
|
|
*/
|
|
|
|
int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2020-03-16 21:43:38 +00:00
|
|
|
int ret;
|
2012-10-29 23:26:56 +00:00
|
|
|
#ifdef CONFIG_SPARC
|
|
|
|
/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
|
|
|
|
if (!dev || num >= dev->archdata.num_irqs)
|
2021-03-31 14:59:36 +00:00
|
|
|
goto out_not_found;
|
2020-03-16 21:43:38 +00:00
|
|
|
ret = dev->archdata.irqs[num];
|
|
|
|
goto out;
|
2012-10-29 23:26:56 +00:00
|
|
|
#else
|
2023-10-03 14:21:22 +00:00
|
|
|
struct fwnode_handle *fwnode = dev_fwnode(&dev->dev);
|
2014-04-23 22:57:41 +00:00
|
|
|
struct resource *r;
|
2014-06-17 22:51:02 +00:00
|
|
|
|
2023-10-03 14:21:22 +00:00
|
|
|
if (is_of_node(fwnode)) {
|
|
|
|
ret = of_irq_get(to_of_node(fwnode), num);
|
2016-07-03 22:04:24 +00:00
|
|
|
if (ret > 0 || ret == -EPROBE_DEFER)
|
2020-03-16 21:43:38 +00:00
|
|
|
goto out;
|
2014-06-17 22:51:02 +00:00
|
|
|
}
|
2014-04-23 22:57:41 +00:00
|
|
|
|
|
|
|
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
|
2023-10-03 14:21:22 +00:00
|
|
|
if (is_acpi_device_node(fwnode)) {
|
2017-02-02 23:23:58 +00:00
|
|
|
if (r && r->flags & IORESOURCE_DISABLED) {
|
2023-10-03 14:21:22 +00:00
|
|
|
ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), num, r);
|
2017-02-02 23:23:58 +00:00
|
|
|
if (ret)
|
2020-03-16 21:43:38 +00:00
|
|
|
goto out;
|
2017-02-02 23:23:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-18 16:12:18 +00:00
|
|
|
/*
|
|
|
|
* The resources may pass trigger flags to the irqs that need
|
|
|
|
* to be set up. It so happens that the trigger flags for
|
|
|
|
* IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
|
|
|
|
* settings.
|
|
|
|
*/
|
2016-09-14 03:32:44 +00:00
|
|
|
if (r && r->flags & IORESOURCE_BITS) {
|
|
|
|
struct irq_data *irqd;
|
|
|
|
|
|
|
|
irqd = irq_get_irq_data(r->start);
|
2021-03-31 14:59:36 +00:00
|
|
|
if (!irqd)
|
|
|
|
goto out_not_found;
|
2016-09-14 03:32:44 +00:00
|
|
|
irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-03-16 21:43:38 +00:00
|
|
|
if (r) {
|
|
|
|
ret = r->start;
|
|
|
|
goto out;
|
|
|
|
}
|
2019-02-11 19:01:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For the index 0 interrupt, allow falling back to GpioInt
|
|
|
|
* resources. While a device could have both Interrupt and GpioInt
|
|
|
|
* resources, making this fallback ambiguous, in many common cases
|
|
|
|
* the device will only expose one IRQ, and this fallback
|
|
|
|
* allows a common code path across either kind of resource.
|
|
|
|
*/
|
2023-10-03 14:21:22 +00:00
|
|
|
if (num == 0 && is_acpi_device_node(fwnode)) {
|
|
|
|
ret = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), num);
|
2019-07-29 20:49:54 +00:00
|
|
|
/* Our callers expect -ENXIO for missing IRQs. */
|
|
|
|
if (ret >= 0 || ret == -EPROBE_DEFER)
|
2020-03-16 21:43:38 +00:00
|
|
|
goto out;
|
2019-07-29 20:49:54 +00:00
|
|
|
}
|
2019-02-11 19:01:12 +00:00
|
|
|
|
2012-10-29 23:26:56 +00:00
|
|
|
#endif
|
2021-03-31 14:59:36 +00:00
|
|
|
out_not_found:
|
|
|
|
ret = -ENXIO;
|
2020-03-16 21:43:38 +00:00
|
|
|
out:
|
2022-03-11 19:35:29 +00:00
|
|
|
if (WARN(!ret, "0 is an invalid IRQ number\n"))
|
|
|
|
return -EINVAL;
|
2020-03-16 21:43:38 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2019-10-09 09:37:46 +00:00
|
|
|
EXPORT_SYMBOL_GPL(platform_get_irq_optional);
|
2019-07-30 05:38:43 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* platform_get_irq - get an IRQ for a device
|
|
|
|
* @dev: platform device
|
|
|
|
* @num: IRQ number index
|
|
|
|
*
|
|
|
|
* Gets an IRQ for a platform device and prints an error message if finding the
|
|
|
|
* IRQ fails. Device drivers should check the return value for errors so as to
|
|
|
|
* not pass a negative integer value to the request_irq() APIs.
|
|
|
|
*
|
2020-04-14 16:48:45 +00:00
|
|
|
* For example::
|
|
|
|
*
|
2019-07-30 05:38:43 +00:00
|
|
|
* int irq = platform_get_irq(pdev, 0);
|
|
|
|
* if (irq < 0)
|
|
|
|
* return irq;
|
|
|
|
*
|
2020-03-16 21:43:38 +00:00
|
|
|
* Return: non-zero IRQ number on success, negative error number on failure.
|
2019-07-30 05:38:43 +00:00
|
|
|
*/
|
|
|
|
int platform_get_irq(struct platform_device *dev, unsigned int num)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2021-04-07 09:47:56 +00:00
|
|
|
ret = platform_get_irq_optional(dev, num);
|
2021-11-05 07:15:09 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return dev_err_probe(&dev->dev, ret,
|
|
|
|
"IRQ index %u not found\n", num);
|
2019-07-30 05:38:43 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2005-12-10 06:36:28 +00:00
|
|
|
EXPORT_SYMBOL_GPL(platform_get_irq);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-01-07 01:12:47 +00:00
|
|
|
/**
|
|
|
|
* platform_irq_count - Count the number of IRQs a platform device uses
|
|
|
|
* @dev: platform device
|
|
|
|
*
|
|
|
|
* Return: Number of IRQs a platform device uses or EPROBE_DEFER
|
|
|
|
*/
|
|
|
|
int platform_irq_count(struct platform_device *dev)
|
|
|
|
{
|
|
|
|
int ret, nr = 0;
|
|
|
|
|
2021-04-07 09:47:56 +00:00
|
|
|
while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
|
2016-01-07 01:12:47 +00:00
|
|
|
nr++;
|
|
|
|
|
|
|
|
if (ret == -EPROBE_DEFER)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return nr;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(platform_irq_count);
|
|
|
|
|
2020-12-02 10:36:56 +00:00
|
|
|
struct irq_affinity_devres {
|
|
|
|
unsigned int count;
|
2023-10-06 20:17:49 +00:00
|
|
|
unsigned int irq[] __counted_by(count);
|
2020-12-02 10:36:56 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void platform_disable_acpi_irq(struct platform_device *pdev, int index)
|
|
|
|
{
|
|
|
|
struct resource *r;
|
|
|
|
|
|
|
|
r = platform_get_resource(pdev, IORESOURCE_IRQ, index);
|
|
|
|
if (r)
|
|
|
|
irqresource_disabled(r, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void devm_platform_get_irqs_affinity_release(struct device *dev,
|
|
|
|
void *res)
|
|
|
|
{
|
|
|
|
struct irq_affinity_devres *ptr = res;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ptr->count; i++) {
|
|
|
|
irq_dispose_mapping(ptr->irq[i]);
|
|
|
|
|
2023-10-03 14:21:22 +00:00
|
|
|
if (is_acpi_device_node(dev_fwnode(dev)))
|
2020-12-02 10:36:56 +00:00
|
|
|
platform_disable_acpi_irq(to_platform_device(dev), i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a
|
|
|
|
* device using an interrupt affinity descriptor
|
|
|
|
* @dev: platform device pointer
|
|
|
|
* @affd: affinity descriptor
|
|
|
|
* @minvec: minimum count of interrupt vectors
|
|
|
|
* @maxvec: maximum count of interrupt vectors
|
|
|
|
* @irqs: pointer holder for IRQ numbers
|
|
|
|
*
|
|
|
|
* Gets a set of IRQs for a platform device, and updates IRQ afffinty according
|
|
|
|
* to the passed affinity descriptor
|
|
|
|
*
|
|
|
|
* Return: Number of vectors on success, negative error number on failure.
|
|
|
|
*/
|
|
|
|
int devm_platform_get_irqs_affinity(struct platform_device *dev,
|
|
|
|
struct irq_affinity *affd,
|
|
|
|
unsigned int minvec,
|
|
|
|
unsigned int maxvec,
|
|
|
|
int **irqs)
|
|
|
|
{
|
|
|
|
struct irq_affinity_devres *ptr;
|
|
|
|
struct irq_affinity_desc *desc;
|
|
|
|
size_t size;
|
|
|
|
int i, ret, nvec;
|
|
|
|
|
|
|
|
if (!affd)
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
if (maxvec < minvec)
|
|
|
|
return -ERANGE;
|
|
|
|
|
|
|
|
nvec = platform_irq_count(dev);
|
2020-12-21 14:30:55 +00:00
|
|
|
if (nvec < 0)
|
|
|
|
return nvec;
|
2020-12-02 10:36:56 +00:00
|
|
|
|
|
|
|
if (nvec < minvec)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
|
|
|
|
if (nvec < minvec)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
if (nvec > maxvec)
|
|
|
|
nvec = maxvec;
|
|
|
|
|
|
|
|
size = sizeof(*ptr) + sizeof(unsigned int) * nvec;
|
|
|
|
ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!ptr)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ptr->count = nvec;
|
|
|
|
|
|
|
|
for (i = 0; i < nvec; i++) {
|
|
|
|
int irq = platform_get_irq(dev, i);
|
|
|
|
if (irq < 0) {
|
|
|
|
ret = irq;
|
|
|
|
goto err_free_devres;
|
|
|
|
}
|
|
|
|
ptr->irq[i] = irq;
|
|
|
|
}
|
|
|
|
|
|
|
|
desc = irq_create_affinity_masks(nvec, affd);
|
|
|
|
if (!desc) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err_free_devres;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nvec; i++) {
|
|
|
|
ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n",
|
|
|
|
ptr->irq[i], ret);
|
|
|
|
goto err_free_desc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
devres_add(&dev->dev, ptr);
|
|
|
|
|
|
|
|
kfree(desc);
|
|
|
|
|
|
|
|
*irqs = ptr->irq;
|
|
|
|
|
|
|
|
return nvec;
|
|
|
|
|
|
|
|
err_free_desc:
|
|
|
|
kfree(desc);
|
|
|
|
err_free_devres:
|
|
|
|
devres_free(ptr);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**
|
2008-01-25 06:50:12 +00:00
|
|
|
* platform_get_resource_byname - get a resource for a device by name
|
|
|
|
* @dev: platform device
|
|
|
|
* @type: resource type
|
|
|
|
* @name: resource name
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-01-25 06:50:12 +00:00
|
|
|
struct resource *platform_get_resource_byname(struct platform_device *dev,
|
2009-04-27 00:38:16 +00:00
|
|
|
unsigned int type,
|
|
|
|
const char *name)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2019-12-10 22:41:37 +00:00
|
|
|
u32 i;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
for (i = 0; i < dev->num_resources; i++) {
|
|
|
|
struct resource *r = &dev->resource[i];
|
|
|
|
|
2012-08-23 14:10:00 +00:00
|
|
|
if (unlikely(!r->name))
|
|
|
|
continue;
|
|
|
|
|
2008-10-16 05:05:15 +00:00
|
|
|
if (type == resource_type(r) && !strcmp(r->name, name))
|
|
|
|
return r;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-12-10 06:36:28 +00:00
|
|
|
EXPORT_SYMBOL_GPL(platform_get_resource_byname);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-10-05 21:04:47 +00:00
|
|
|
static int __platform_get_irq_byname(struct platform_device *dev,
|
|
|
|
const char *name)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2014-05-20 10:42:02 +00:00
|
|
|
struct resource *r;
|
2019-10-23 12:25:05 +00:00
|
|
|
int ret;
|
2014-05-20 10:42:02 +00:00
|
|
|
|
2022-11-11 09:45:42 +00:00
|
|
|
ret = fwnode_irq_get_byname(dev_fwnode(&dev->dev), name);
|
|
|
|
if (ret > 0 || ret == -EPROBE_DEFER)
|
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-05-20 10:42:02 +00:00
|
|
|
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
|
2020-03-16 21:43:38 +00:00
|
|
|
if (r) {
|
2022-03-11 19:35:29 +00:00
|
|
|
if (WARN(!r->start, "0 is an invalid IRQ number\n"))
|
|
|
|
return -EINVAL;
|
2019-07-30 05:38:43 +00:00
|
|
|
return r->start;
|
2020-03-16 21:43:38 +00:00
|
|
|
}
|
2019-07-30 05:38:43 +00:00
|
|
|
|
|
|
|
return -ENXIO;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2019-10-05 21:04:47 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* platform_get_irq_byname - get an IRQ for a device by name
|
|
|
|
* @dev: platform device
|
|
|
|
* @name: IRQ name
|
|
|
|
*
|
|
|
|
* Get an IRQ like platform_get_irq(), but then by name rather then by index.
|
|
|
|
*
|
2020-03-16 21:43:38 +00:00
|
|
|
* Return: non-zero IRQ number on success, negative error number on failure.
|
2019-10-05 21:04:47 +00:00
|
|
|
*/
|
|
|
|
int platform_get_irq_byname(struct platform_device *dev, const char *name)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = __platform_get_irq_byname(dev, name);
|
2022-02-04 20:25:23 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return dev_err_probe(&dev->dev, ret, "IRQ %s not found\n",
|
|
|
|
name);
|
2019-10-05 21:04:47 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2005-12-10 06:36:28 +00:00
|
|
|
EXPORT_SYMBOL_GPL(platform_get_irq_byname);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-10-05 21:04:47 +00:00
|
|
|
/**
|
|
|
|
* platform_get_irq_byname_optional - get an optional IRQ for a device by name
|
|
|
|
* @dev: platform device
|
|
|
|
* @name: IRQ name
|
|
|
|
*
|
|
|
|
* Get an optional IRQ by name like platform_get_irq_byname(). Except that it
|
|
|
|
* does not print an error message if an IRQ can not be obtained.
|
|
|
|
*
|
2020-03-16 21:43:38 +00:00
|
|
|
* Return: non-zero IRQ number on success, negative error number on failure.
|
2019-10-05 21:04:47 +00:00
|
|
|
*/
|
|
|
|
int platform_get_irq_byname_optional(struct platform_device *dev,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
return __platform_get_irq_byname(dev, name);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**
|
2008-01-25 06:50:12 +00:00
|
|
|
* platform_add_devices - add a numbers of platform devices
|
|
|
|
* @devs: array of platform devices to add
|
|
|
|
* @num: number of platform devices in array
|
2022-12-20 08:51:16 +00:00
|
|
|
*
|
|
|
|
* Return: 0 on success, negative error number on failure.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
int platform_add_devices(struct platform_device **devs, int num)
|
|
|
|
{
|
|
|
|
int i, ret = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
ret = platform_device_register(devs[i]);
|
|
|
|
if (ret) {
|
|
|
|
while (--i >= 0)
|
|
|
|
platform_device_unregister(devs[i]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2005-12-10 06:36:28 +00:00
|
|
|
EXPORT_SYMBOL_GPL(platform_add_devices);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-11-05 21:19:33 +00:00
|
|
|
struct platform_object {
|
|
|
|
struct platform_device pdev;
|
driver core/platform: remove unused implicit padding in platform_object
Up to 7 bytes are wasted at the end of struct platform_object
in the form of padding after name field: unfortunately this
padding is not used when allocating the memory to hold the
name.
This patch converts name array from name[1] to C99 flexible
array name[] (equivalent to name[0]) so that no padding is
required by the presence of this field. Memory allocation
is updated to take care of allocating an additional byte for
the NUL terminating character.
Built on Fedora 20, using GCC 4.8, for ARM, i386, SPARC64 and
x86_64 architectures, the data structure layout can be reported
with following command:
$ pahole drivers/base/platform.o \
--recursive \
--class_name device,pdev_archdata,platform_device,platform_object
Please find below some comparisons of structure layout for arm,
i386, sparc64 and x86_64 architecture before and after the patch.
--- obj-arm/drivers/base/platform.o.pahole.v3.15-rc7-79-gfe45736f4134 2014-05-30 10:32:06.290960701 +0200
+++ obj-arm/drivers/base/platform.o.pahole.v3.15-rc7-80-g2cdb06858d71 2014-05-30 11:26:20.851988347 +0200
@@ -81,10 +81,9 @@
/* XXX last struct has 4 bytes of padding */
/* --- cacheline 6 boundary (384 bytes) was 8 bytes ago --- */
- char name[1]; /* 392 1 */
+ char name[0]; /* 392 0 */
- /* size: 400, cachelines: 7, members: 2 */
- /* padding: 7 */
+ /* size: 392, cachelines: 7, members: 2 */
/* paddings: 1, sum paddings: 4 */
- /* last cacheline: 16 bytes */
+ /* last cacheline: 8 bytes */
};
--- obj-i386/drivers/base/platform.o.pahole.v3.15-rc7-79-gfe45736f4134 2014-05-30 10:32:06.305960691 +0200
+++ obj-i386/drivers/base/platform.o.pahole.v3.15-rc7-80-g2cdb06858d71 2014-05-30 11:26:20.875988332 +0200
@@ -73,9 +73,8 @@
struct platform_object {
struct platform_device pdev; /* 0 396 */
/* --- cacheline 6 boundary (384 bytes) was 12 bytes ago --- */
- char name[1]; /* 396 1 */
+ char name[0]; /* 396 0 */
- /* size: 400, cachelines: 7, members: 2 */
- /* padding: 3 */
- /* last cacheline: 16 bytes */
+ /* size: 396, cachelines: 7, members: 2 */
+ /* last cacheline: 12 bytes */
};
--- obj-sparc64/drivers/base/platform.o.pahole.v3.15-rc7-79-gfe45736f4134 2014-05-30 10:32:06.406960625 +0200
+++ obj-sparc64/drivers/base/platform.o.pahole.v3.15-rc7-80-g2cdb06858d71 2014-05-30 11:26:20.971988269 +0200
@@ -94,9 +94,8 @@
struct platform_object {
struct platform_device pdev; /* 0 2208 */
/* --- cacheline 34 boundary (2176 bytes) was 32 bytes ago --- */
- char name[1]; /* 2208 1 */
+ char name[0]; /* 2208 0 */
- /* size: 2216, cachelines: 35, members: 2 */
- /* padding: 7 */
- /* last cacheline: 40 bytes */
+ /* size: 2208, cachelines: 35, members: 2 */
+ /* last cacheline: 32 bytes */
};
--- obj-x86_64/drivers/base/platform.o.pahole.v3.15-rc7-79-gfe45736f4134 2014-05-30 10:32:06.432960608 +0200
+++ obj-x86_64/drivers/base/platform.o.pahole.v3.15-rc7-80-g2cdb06858d71 2014-05-30 11:26:21.000988250 +0200
@@ -84,9 +84,8 @@
struct platform_object {
struct platform_device pdev; /* 0 720 */
/* --- cacheline 11 boundary (704 bytes) was 16 bytes ago --- */
- char name[1]; /* 720 1 */
+ char name[0]; /* 720 0 */
- /* size: 728, cachelines: 12, members: 2 */
- /* padding: 7 */
- /* last cacheline: 24 bytes */
+ /* size: 720, cachelines: 12, members: 2 */
+ /* last cacheline: 16 bytes */
};
Changes from v5 [1]:
- dropped dma_mask allocation changes and only kept padding
removal changes (name array length set to 0).
Changes from v4 [2]:
[by Emil Goode <emilgoode@gmail.com>:]
- Split v4 of the patch into two separate patches.
- Generated new object file size and data structure layout info.
- Updated the changelog message.
Changes from v3 [3]:
- fixed commit message so that git am doesn't fail.
Changes from v2 [4]:
- move 'dma_mask' to platform_object so that it's always
allocated and won't leak on release; remove all previously
added support functions.
- use C99 flexible array member for 'name' to remove padding
at the end of platform_object.
Changes from v1 [5]:
- remove unneeded kfree() from error path
- add reference to author/commit adding allocation of dmamask
Changes from v0 [6]:
- small rewrite to squeeze the patch to a bare minimal
[1] http://lkml.kernel.org/r/1401122483-31603-2-git-send-email-emilgoode@gmail.com
http://lkml.kernel.org/r/1401122483-31603-1-git-send-email-emilgoode@gmail.com
http://lkml.kernel.org/r/1401122483-31603-3-git-send-email-emilgoode@gmail.com
[2] http://lkml.kernel.org/r/1390817152-30898-1-git-send-email-ydroneaud@opteya.com
https://patchwork.kernel.org/patch/3541871/
[3] http://lkml.kernel.org/r/1390771138-28348-1-git-send-email-ydroneaud@opteya.com
https://patchwork.kernel.org/patch/3540081/
[4] http://lkml.kernel.org/r/1389683909-17495-1-git-send-email-ydroneaud@opteya.com
https://patchwork.kernel.org/patch/3484411/
[5] http://lkml.kernel.org/r/1389649085-7365-1-git-send-email-ydroneaud@opteya.com
https://patchwork.kernel.org/patch/3480961/
[6] http://lkml.kernel.org/r/1386886207-2735-1-git-send-email-ydroneaud@opteya.com
Cc: Emil Goode <emilgoode@gmail.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Shawn Guo <shawn.guo@freescale.com>
Cc: Sascha Hauer <kernel@pengutronix.de>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Olof Johansson <olof@lixom.net>
Cc: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com>
Signed-off-by: Yann Droneaud <ydroneaud@opteya.com>
Acked-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-05-30 20:02:47 +00:00
|
|
|
char name[];
|
2005-11-05 21:19:33 +00:00
|
|
|
};
|
|
|
|
|
2019-08-16 06:24:35 +00:00
|
|
|
/*
|
|
|
|
* Set up default DMA mask for platform devices if the they weren't
|
|
|
|
* previously set by the architecture / DT.
|
|
|
|
*/
|
|
|
|
static void setup_pdev_dma_masks(struct platform_device *pdev)
|
|
|
|
{
|
2020-04-22 10:09:54 +00:00
|
|
|
pdev->dev.dma_parms = &pdev->dma_parms;
|
|
|
|
|
2019-08-16 06:24:35 +00:00
|
|
|
if (!pdev->dev.coherent_dma_mask)
|
|
|
|
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
|
driver code: clarify and fix platform device DMA mask allocation
This does three inter-related things to clarify the usage of the
platform device dma_mask field. In the process, fix the bug introduced
by cdfee5623290 ("driver core: initialize a default DMA mask for
platform device") that caused Artem Tashkinov's laptop to not boot with
newer Fedora kernels.
This does:
- First off, rename the field to "platform_dma_mask" to make it
greppable.
We have way too many different random fields called "dma_mask" in
various data structures, where some of them are actual masks, and
some of them are just pointers to the mask. And the structures all
have pointers to each other, or embed each other inside themselves,
and "pdev" sometimes means "platform device" and sometimes it means
"PCI device".
So to make it clear in the code when you actually use this new field,
give it a unique name (it really should be something even more unique
like "platform_device_dma_mask", since it's per platform device, not
per platform, but that gets old really fast, and this is unique
enough in context).
To further clarify when the field gets used, initialize it when we
actually start using it with the default value.
- Then, use this field instead of the random one-off allocation in
platform_device_register_full() that is now unnecessary since we now
already have a perfectly fine allocation for it in the platform
device structure.
- The above then allows us to fix the actual bug, where the error path
of platform_device_register_full() would unconditionally free the
platform device DMA allocation with 'kfree()'.
That kfree() was dont regardless of whether the allocation had been
done earlier with the (now removed) kmalloc, or whether
setup_pdev_dma_masks() had already been used and the dma_mask pointer
pointed to the mask that was part of the platform device.
It seems most people never triggered the error path, or only triggered
it from a call chain that set an explicit pdevinfo->dma_mask value (and
thus caused the unnecessary allocation that was "cleaned up" in the
error path) before calling platform_device_register_full().
Robin Murphy points out that in Artem's case the wdat_wdt driver failed
in platform_device_add(), and that was the one that had called
platform_device_register_full() with pdevinfo.dma_mask = 0, and would
have caused that kfree() of pdev.dma_mask corrupting the heap.
A later unrelated kmalloc() then oopsed due to the heap corruption.
Fixes: cdfee5623290 ("driver core: initialize a default DMA mask for platform device")
Reported-bisected-and-tested-by: Artem S. Tashkinov <aros@gmx.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-03-11 16:07:10 +00:00
|
|
|
if (!pdev->dev.dma_mask) {
|
|
|
|
pdev->platform_dma_mask = DMA_BIT_MASK(32);
|
|
|
|
pdev->dev.dma_mask = &pdev->platform_dma_mask;
|
|
|
|
}
|
2019-08-16 06:24:35 +00:00
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**
|
2010-02-14 14:18:53 +00:00
|
|
|
* platform_device_put - destroy a platform device
|
2008-01-25 06:50:12 +00:00
|
|
|
* @pdev: platform device to free
|
2005-11-05 21:19:33 +00:00
|
|
|
*
|
2008-01-25 06:50:12 +00:00
|
|
|
* Free all memory associated with a platform device. This function must
|
|
|
|
* _only_ be externally called in error cases. All other usage is a bug.
|
2005-11-05 21:19:33 +00:00
|
|
|
*/
|
|
|
|
void platform_device_put(struct platform_device *pdev)
|
|
|
|
{
|
2018-12-03 18:21:41 +00:00
|
|
|
if (!IS_ERR_OR_NULL(pdev))
|
2005-11-05 21:19:33 +00:00
|
|
|
put_device(&pdev->dev);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(platform_device_put);
|
|
|
|
|
|
|
|
static void platform_device_release(struct device *dev)
|
|
|
|
{
|
2008-01-25 06:50:12 +00:00
|
|
|
struct platform_object *pa = container_of(dev, struct platform_object,
|
|
|
|
pdev.dev);
|
2005-11-05 21:19:33 +00:00
|
|
|
|
2021-02-11 23:27:45 +00:00
|
|
|
of_node_put(pa->pdev.dev.of_node);
|
2005-11-05 21:19:33 +00:00
|
|
|
kfree(pa->pdev.dev.platform_data);
|
2011-04-07 22:43:01 +00:00
|
|
|
kfree(pa->pdev.mfd_cell);
|
2005-11-05 21:19:33 +00:00
|
|
|
kfree(pa->pdev.resource);
|
2014-06-03 00:42:58 +00:00
|
|
|
kfree(pa->pdev.driver_override);
|
2005-11-05 21:19:33 +00:00
|
|
|
kfree(pa);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2010-02-14 14:18:53 +00:00
|
|
|
* platform_device_alloc - create a platform device
|
2008-01-25 06:50:12 +00:00
|
|
|
* @name: base name of the device we're adding
|
|
|
|
* @id: instance id
|
2005-11-05 21:19:33 +00:00
|
|
|
*
|
2008-01-25 06:50:12 +00:00
|
|
|
* Create a platform device object which can have other objects attached
|
|
|
|
* to it, and which will have attached objects freed when it is released.
|
2005-11-05 21:19:33 +00:00
|
|
|
*/
|
2007-09-09 10:54:16 +00:00
|
|
|
struct platform_device *platform_device_alloc(const char *name, int id)
|
2005-11-05 21:19:33 +00:00
|
|
|
{
|
|
|
|
struct platform_object *pa;
|
|
|
|
|
driver core/platform: remove unused implicit padding in platform_object
Up to 7 bytes are wasted at the end of struct platform_object
in the form of padding after name field: unfortunately this
padding is not used when allocating the memory to hold the
name.
This patch converts name array from name[1] to C99 flexible
array name[] (equivalent to name[0]) so that no padding is
required by the presence of this field. Memory allocation
is updated to take care of allocating an additional byte for
the NUL terminating character.
Built on Fedora 20, using GCC 4.8, for ARM, i386, SPARC64 and
x86_64 architectures, the data structure layout can be reported
with following command:
$ pahole drivers/base/platform.o \
--recursive \
--class_name device,pdev_archdata,platform_device,platform_object
Please find below some comparisons of structure layout for arm,
i386, sparc64 and x86_64 architecture before and after the patch.
--- obj-arm/drivers/base/platform.o.pahole.v3.15-rc7-79-gfe45736f4134 2014-05-30 10:32:06.290960701 +0200
+++ obj-arm/drivers/base/platform.o.pahole.v3.15-rc7-80-g2cdb06858d71 2014-05-30 11:26:20.851988347 +0200
@@ -81,10 +81,9 @@
/* XXX last struct has 4 bytes of padding */
/* --- cacheline 6 boundary (384 bytes) was 8 bytes ago --- */
- char name[1]; /* 392 1 */
+ char name[0]; /* 392 0 */
- /* size: 400, cachelines: 7, members: 2 */
- /* padding: 7 */
+ /* size: 392, cachelines: 7, members: 2 */
/* paddings: 1, sum paddings: 4 */
- /* last cacheline: 16 bytes */
+ /* last cacheline: 8 bytes */
};
--- obj-i386/drivers/base/platform.o.pahole.v3.15-rc7-79-gfe45736f4134 2014-05-30 10:32:06.305960691 +0200
+++ obj-i386/drivers/base/platform.o.pahole.v3.15-rc7-80-g2cdb06858d71 2014-05-30 11:26:20.875988332 +0200
@@ -73,9 +73,8 @@
struct platform_object {
struct platform_device pdev; /* 0 396 */
/* --- cacheline 6 boundary (384 bytes) was 12 bytes ago --- */
- char name[1]; /* 396 1 */
+ char name[0]; /* 396 0 */
- /* size: 400, cachelines: 7, members: 2 */
- /* padding: 3 */
- /* last cacheline: 16 bytes */
+ /* size: 396, cachelines: 7, members: 2 */
+ /* last cacheline: 12 bytes */
};
--- obj-sparc64/drivers/base/platform.o.pahole.v3.15-rc7-79-gfe45736f4134 2014-05-30 10:32:06.406960625 +0200
+++ obj-sparc64/drivers/base/platform.o.pahole.v3.15-rc7-80-g2cdb06858d71 2014-05-30 11:26:20.971988269 +0200
@@ -94,9 +94,8 @@
struct platform_object {
struct platform_device pdev; /* 0 2208 */
/* --- cacheline 34 boundary (2176 bytes) was 32 bytes ago --- */
- char name[1]; /* 2208 1 */
+ char name[0]; /* 2208 0 */
- /* size: 2216, cachelines: 35, members: 2 */
- /* padding: 7 */
- /* last cacheline: 40 bytes */
+ /* size: 2208, cachelines: 35, members: 2 */
+ /* last cacheline: 32 bytes */
};
--- obj-x86_64/drivers/base/platform.o.pahole.v3.15-rc7-79-gfe45736f4134 2014-05-30 10:32:06.432960608 +0200
+++ obj-x86_64/drivers/base/platform.o.pahole.v3.15-rc7-80-g2cdb06858d71 2014-05-30 11:26:21.000988250 +0200
@@ -84,9 +84,8 @@
struct platform_object {
struct platform_device pdev; /* 0 720 */
/* --- cacheline 11 boundary (704 bytes) was 16 bytes ago --- */
- char name[1]; /* 720 1 */
+ char name[0]; /* 720 0 */
- /* size: 728, cachelines: 12, members: 2 */
- /* padding: 7 */
- /* last cacheline: 24 bytes */
+ /* size: 720, cachelines: 12, members: 2 */
+ /* last cacheline: 16 bytes */
};
Changes from v5 [1]:
- dropped dma_mask allocation changes and only kept padding
removal changes (name array length set to 0).
Changes from v4 [2]:
[by Emil Goode <emilgoode@gmail.com>:]
- Split v4 of the patch into two separate patches.
- Generated new object file size and data structure layout info.
- Updated the changelog message.
Changes from v3 [3]:
- fixed commit message so that git am doesn't fail.
Changes from v2 [4]:
- move 'dma_mask' to platform_object so that it's always
allocated and won't leak on release; remove all previously
added support functions.
- use C99 flexible array member for 'name' to remove padding
at the end of platform_object.
Changes from v1 [5]:
- remove unneeded kfree() from error path
- add reference to author/commit adding allocation of dmamask
Changes from v0 [6]:
- small rewrite to squeeze the patch to a bare minimal
[1] http://lkml.kernel.org/r/1401122483-31603-2-git-send-email-emilgoode@gmail.com
http://lkml.kernel.org/r/1401122483-31603-1-git-send-email-emilgoode@gmail.com
http://lkml.kernel.org/r/1401122483-31603-3-git-send-email-emilgoode@gmail.com
[2] http://lkml.kernel.org/r/1390817152-30898-1-git-send-email-ydroneaud@opteya.com
https://patchwork.kernel.org/patch/3541871/
[3] http://lkml.kernel.org/r/1390771138-28348-1-git-send-email-ydroneaud@opteya.com
https://patchwork.kernel.org/patch/3540081/
[4] http://lkml.kernel.org/r/1389683909-17495-1-git-send-email-ydroneaud@opteya.com
https://patchwork.kernel.org/patch/3484411/
[5] http://lkml.kernel.org/r/1389649085-7365-1-git-send-email-ydroneaud@opteya.com
https://patchwork.kernel.org/patch/3480961/
[6] http://lkml.kernel.org/r/1386886207-2735-1-git-send-email-ydroneaud@opteya.com
Cc: Emil Goode <emilgoode@gmail.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Shawn Guo <shawn.guo@freescale.com>
Cc: Sascha Hauer <kernel@pengutronix.de>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Olof Johansson <olof@lixom.net>
Cc: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com>
Signed-off-by: Yann Droneaud <ydroneaud@opteya.com>
Acked-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-05-30 20:02:47 +00:00
|
|
|
pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
|
2005-11-05 21:19:33 +00:00
|
|
|
if (pa) {
|
|
|
|
strcpy(pa->name, name);
|
|
|
|
pa->pdev.name = pa->name;
|
|
|
|
pa->pdev.id = id;
|
|
|
|
device_initialize(&pa->pdev.dev);
|
|
|
|
pa->pdev.dev.release = platform_device_release;
|
2019-08-16 06:24:35 +00:00
|
|
|
setup_pdev_dma_masks(&pa->pdev);
|
2005-11-05 21:19:33 +00:00
|
|
|
}
|
|
|
|
|
2005-12-10 06:36:27 +00:00
|
|
|
return pa ? &pa->pdev : NULL;
|
2005-11-05 21:19:33 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(platform_device_alloc);
|
|
|
|
|
|
|
|
/**
|
2010-02-14 14:18:53 +00:00
|
|
|
* platform_device_add_resources - add resources to a platform device
|
2008-01-25 06:50:12 +00:00
|
|
|
* @pdev: platform device allocated by platform_device_alloc to add resources to
|
|
|
|
* @res: set of resources that needs to be allocated for the device
|
|
|
|
* @num: number of resources
|
2005-11-05 21:19:33 +00:00
|
|
|
*
|
2008-01-25 06:50:12 +00:00
|
|
|
* Add a copy of the resources to the platform device. The memory
|
|
|
|
* associated with the resources will be freed when the platform device is
|
|
|
|
* released.
|
2005-11-05 21:19:33 +00:00
|
|
|
*/
|
2008-01-25 06:50:12 +00:00
|
|
|
int platform_device_add_resources(struct platform_device *pdev,
|
2009-01-28 20:01:02 +00:00
|
|
|
const struct resource *res, unsigned int num)
|
2005-11-05 21:19:33 +00:00
|
|
|
{
|
2011-04-20 07:44:44 +00:00
|
|
|
struct resource *r = NULL;
|
2005-11-05 21:19:33 +00:00
|
|
|
|
2011-04-20 07:44:44 +00:00
|
|
|
if (res) {
|
|
|
|
r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
|
|
|
|
if (!r)
|
|
|
|
return -ENOMEM;
|
2005-11-05 21:19:33 +00:00
|
|
|
}
|
2011-04-20 07:44:44 +00:00
|
|
|
|
2011-04-20 07:44:45 +00:00
|
|
|
kfree(pdev->resource);
|
2011-04-20 07:44:44 +00:00
|
|
|
pdev->resource = r;
|
|
|
|
pdev->num_resources = num;
|
|
|
|
return 0;
|
2005-11-05 21:19:33 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(platform_device_add_resources);
|
|
|
|
|
|
|
|
/**
|
2010-02-14 14:18:53 +00:00
|
|
|
* platform_device_add_data - add platform-specific data to a platform device
|
2008-01-25 06:50:12 +00:00
|
|
|
* @pdev: platform device allocated by platform_device_alloc to add resources to
|
|
|
|
* @data: platform specific data for this platform device
|
|
|
|
* @size: size of platform specific data
|
2005-11-05 21:19:33 +00:00
|
|
|
*
|
2008-01-25 06:50:12 +00:00
|
|
|
* Add a copy of platform specific data to the platform device's
|
|
|
|
* platform_data pointer. The memory associated with the platform data
|
|
|
|
* will be freed when the platform device is released.
|
2005-11-05 21:19:33 +00:00
|
|
|
*/
|
2008-01-25 06:50:12 +00:00
|
|
|
int platform_device_add_data(struct platform_device *pdev, const void *data,
|
|
|
|
size_t size)
|
2005-11-05 21:19:33 +00:00
|
|
|
{
|
2011-04-20 07:44:42 +00:00
|
|
|
void *d = NULL;
|
2010-09-07 13:31:49 +00:00
|
|
|
|
2011-04-20 07:44:42 +00:00
|
|
|
if (data) {
|
|
|
|
d = kmemdup(data, size, GFP_KERNEL);
|
|
|
|
if (!d)
|
|
|
|
return -ENOMEM;
|
2005-11-05 21:19:33 +00:00
|
|
|
}
|
2011-04-20 07:44:42 +00:00
|
|
|
|
2011-04-20 07:44:43 +00:00
|
|
|
kfree(pdev->dev.platform_data);
|
2011-04-20 07:44:42 +00:00
|
|
|
pdev->dev.platform_data = d;
|
|
|
|
return 0;
|
2005-11-05 21:19:33 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(platform_device_add_data);
|
|
|
|
|
|
|
|
/**
|
2008-01-25 06:50:12 +00:00
|
|
|
* platform_device_add - add a platform device to device hierarchy
|
|
|
|
* @pdev: platform device we're adding
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2008-01-25 06:50:12 +00:00
|
|
|
* This is part 2 of platform_device_register(), though may be called
|
|
|
|
* separately _iff_ pdev was allocated by platform_device_alloc().
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-11-05 21:19:33 +00:00
|
|
|
int platform_device_add(struct platform_device *pdev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2023-10-03 14:21:21 +00:00
|
|
|
struct device *dev = &pdev->dev;
|
2019-12-10 22:41:37 +00:00
|
|
|
u32 i;
|
|
|
|
int ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-10-03 14:21:21 +00:00
|
|
|
if (!dev->parent)
|
|
|
|
dev->parent = &platform_bus;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-10-03 14:21:21 +00:00
|
|
|
dev->bus = &platform_bus_type;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-07-27 20:14:59 +00:00
|
|
|
switch (pdev->id) {
|
|
|
|
default:
|
2023-10-03 14:21:21 +00:00
|
|
|
dev_set_name(dev, "%s.%d", pdev->name, pdev->id);
|
2012-07-27 20:14:59 +00:00
|
|
|
break;
|
|
|
|
case PLATFORM_DEVID_NONE:
|
2023-10-03 14:21:21 +00:00
|
|
|
dev_set_name(dev, "%s", pdev->name);
|
2012-07-27 20:14:59 +00:00
|
|
|
break;
|
|
|
|
case PLATFORM_DEVID_AUTO:
|
|
|
|
/*
|
|
|
|
* Automatically allocated device ID. We mark it as such so
|
|
|
|
* that we remember it must be freed, and we append a suffix
|
|
|
|
* to avoid namespace collision with explicit IDs.
|
|
|
|
*/
|
2020-09-09 18:02:48 +00:00
|
|
|
ret = ida_alloc(&platform_devid_ida, GFP_KERNEL);
|
2012-07-27 20:14:59 +00:00
|
|
|
if (ret < 0)
|
2023-10-03 14:21:20 +00:00
|
|
|
return ret;
|
2012-07-27 20:14:59 +00:00
|
|
|
pdev->id = ret;
|
|
|
|
pdev->id_auto = true;
|
2023-10-03 14:21:21 +00:00
|
|
|
dev_set_name(dev, "%s.%d.auto", pdev->name, pdev->id);
|
2012-07-27 20:14:59 +00:00
|
|
|
break;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
for (i = 0; i < pdev->num_resources; i++) {
|
2015-06-10 15:38:02 +00:00
|
|
|
struct resource *p, *r = &pdev->resource[i];
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (r->name == NULL)
|
2023-10-03 14:21:21 +00:00
|
|
|
r->name = dev_name(dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
p = r->parent;
|
|
|
|
if (!p) {
|
2015-06-10 15:38:29 +00:00
|
|
|
if (resource_type(r) == IORESOURCE_MEM)
|
2005-04-16 22:20:36 +00:00
|
|
|
p = &iomem_resource;
|
2015-06-10 15:38:29 +00:00
|
|
|
else if (resource_type(r) == IORESOURCE_IO)
|
2005-04-16 22:20:36 +00:00
|
|
|
p = &ioport_resource;
|
|
|
|
}
|
|
|
|
|
2019-04-04 08:11:58 +00:00
|
|
|
if (p) {
|
|
|
|
ret = insert_resource(p, r);
|
|
|
|
if (ret) {
|
2023-10-03 14:21:21 +00:00
|
|
|
dev_err(dev, "failed to claim resource %d: %pR\n", i, r);
|
2019-04-04 08:11:58 +00:00
|
|
|
goto failed;
|
|
|
|
}
|
2015-06-10 15:38:02 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2023-10-03 14:21:21 +00:00
|
|
|
pr_debug("Registering platform device '%s'. Parent at %s\n", dev_name(dev),
|
|
|
|
dev_name(dev->parent));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-10-03 14:21:21 +00:00
|
|
|
ret = device_add(dev);
|
2023-10-03 14:21:20 +00:00
|
|
|
if (ret)
|
|
|
|
goto failed;
|
|
|
|
|
|
|
|
return 0;
|
2015-06-10 15:36:50 +00:00
|
|
|
|
2015-06-10 15:38:02 +00:00
|
|
|
failed:
|
2015-06-10 15:36:50 +00:00
|
|
|
if (pdev->id_auto) {
|
2020-09-09 18:02:48 +00:00
|
|
|
ida_free(&platform_devid_ida, pdev->id);
|
2015-06-10 15:36:50 +00:00
|
|
|
pdev->id = PLATFORM_DEVID_AUTO;
|
|
|
|
}
|
|
|
|
|
2020-01-16 17:57:58 +00:00
|
|
|
while (i--) {
|
2015-06-10 15:36:50 +00:00
|
|
|
struct resource *r = &pdev->resource[i];
|
drivercore: Fix unregistration path of platform devices
The unregister path of platform_device is broken. On registration, it
will register all resources with either a parent already set, or
type==IORESOURCE_{IO,MEM}. However, on unregister it will release
everything with type==IORESOURCE_{IO,MEM}, but ignore the others. There
are also cases where resources don't get registered in the first place,
like with devices created by of_platform_populate()*.
Fix the unregister path to be symmetrical with the register path by
checking the parent pointer instead of the type field to decide which
resources to unregister. This is safe because the upshot of the
registration path algorithm is that registered resources have a parent
pointer, and non-registered resources do not.
* It can be argued that of_platform_populate() should be registering
it's resources, and they argument has some merit. However, there are
quite a few platforms that end up broken if we try to do that due to
overlapping resources in the device tree. Until that is fixed, we need
to solve the immediate problem.
Cc: Pantelis Antoniou <pantelis.antoniou@konsulko.com>
Cc: Wolfram Sang <wsa@the-dreams.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
Signed-off-by: Grant Likely <grant.likely@linaro.org>
Tested-by: Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
Tested-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
Cc: stable@vger.kernel.org
Signed-off-by: Rob Herring <robh@kernel.org>
2015-06-07 14:20:11 +00:00
|
|
|
if (r->parent)
|
2015-06-10 15:36:50 +00:00
|
|
|
release_resource(r);
|
|
|
|
}
|
2008-10-16 05:05:15 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2005-11-05 21:19:33 +00:00
|
|
|
EXPORT_SYMBOL_GPL(platform_device_add);
|
|
|
|
|
|
|
|
/**
|
2008-01-25 06:50:12 +00:00
|
|
|
* platform_device_del - remove a platform-level device
|
|
|
|
* @pdev: platform device we're removing
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2008-01-25 06:50:12 +00:00
|
|
|
* Note that this function will also release all memory- and port-based
|
|
|
|
* resources owned by the device (@dev->resource). This function must
|
|
|
|
* _only_ be externally called in error cases. All other usage is a bug.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-12-10 06:36:27 +00:00
|
|
|
void platform_device_del(struct platform_device *pdev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2019-12-10 22:41:37 +00:00
|
|
|
u32 i;
|
2008-10-16 05:05:15 +00:00
|
|
|
|
2018-12-03 18:21:41 +00:00
|
|
|
if (!IS_ERR_OR_NULL(pdev)) {
|
2015-06-10 15:36:50 +00:00
|
|
|
device_del(&pdev->dev);
|
|
|
|
|
|
|
|
if (pdev->id_auto) {
|
2020-09-09 18:02:48 +00:00
|
|
|
ida_free(&platform_devid_ida, pdev->id);
|
2015-06-10 15:36:50 +00:00
|
|
|
pdev->id = PLATFORM_DEVID_AUTO;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < pdev->num_resources; i++) {
|
|
|
|
struct resource *r = &pdev->resource[i];
|
drivercore: Fix unregistration path of platform devices
The unregister path of platform_device is broken. On registration, it
will register all resources with either a parent already set, or
type==IORESOURCE_{IO,MEM}. However, on unregister it will release
everything with type==IORESOURCE_{IO,MEM}, but ignore the others. There
are also cases where resources don't get registered in the first place,
like with devices created by of_platform_populate()*.
Fix the unregister path to be symmetrical with the register path by
checking the parent pointer instead of the type field to decide which
resources to unregister. This is safe because the upshot of the
registration path algorithm is that registered resources have a parent
pointer, and non-registered resources do not.
* It can be argued that of_platform_populate() should be registering
it's resources, and they argument has some merit. However, there are
quite a few platforms that end up broken if we try to do that due to
overlapping resources in the device tree. Until that is fixed, we need
to solve the immediate problem.
Cc: Pantelis Antoniou <pantelis.antoniou@konsulko.com>
Cc: Wolfram Sang <wsa@the-dreams.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
Signed-off-by: Grant Likely <grant.likely@linaro.org>
Tested-by: Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
Tested-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
Cc: stable@vger.kernel.org
Signed-off-by: Rob Herring <robh@kernel.org>
2015-06-07 14:20:11 +00:00
|
|
|
if (r->parent)
|
2015-06-10 15:36:50 +00:00
|
|
|
release_resource(r);
|
|
|
|
}
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-12-10 06:36:27 +00:00
|
|
|
EXPORT_SYMBOL_GPL(platform_device_del);
|
|
|
|
|
|
|
|
/**
|
2008-01-25 06:50:12 +00:00
|
|
|
* platform_device_register - add a platform-level device
|
|
|
|
* @pdev: platform device we're adding
|
2021-12-22 10:42:13 +00:00
|
|
|
*
|
|
|
|
* NOTE: _Never_ directly free @pdev after calling this function, even if it
|
|
|
|
* returned an error! Always use platform_device_put() to give up the
|
|
|
|
* reference initialised in this function instead.
|
2005-12-10 06:36:27 +00:00
|
|
|
*/
|
2008-01-25 06:50:12 +00:00
|
|
|
int platform_device_register(struct platform_device *pdev)
|
2005-12-10 06:36:27 +00:00
|
|
|
{
|
|
|
|
device_initialize(&pdev->dev);
|
2019-08-16 06:24:35 +00:00
|
|
|
setup_pdev_dma_masks(pdev);
|
2005-12-10 06:36:27 +00:00
|
|
|
return platform_device_add(pdev);
|
|
|
|
}
|
2005-12-10 06:36:28 +00:00
|
|
|
EXPORT_SYMBOL_GPL(platform_device_register);
|
2005-12-10 06:36:27 +00:00
|
|
|
|
|
|
|
/**
|
2008-01-25 06:50:12 +00:00
|
|
|
* platform_device_unregister - unregister a platform-level device
|
|
|
|
* @pdev: platform device we're unregistering
|
2005-12-10 06:36:27 +00:00
|
|
|
*
|
2008-01-25 06:50:12 +00:00
|
|
|
* Unregistration is done in 2 steps. First we release all resources
|
|
|
|
* and remove it from the subsystem, then we drop reference count by
|
|
|
|
* calling platform_device_put().
|
2005-12-10 06:36:27 +00:00
|
|
|
*/
|
2008-01-25 06:50:12 +00:00
|
|
|
void platform_device_unregister(struct platform_device *pdev)
|
2005-12-10 06:36:27 +00:00
|
|
|
{
|
|
|
|
platform_device_del(pdev);
|
|
|
|
platform_device_put(pdev);
|
|
|
|
}
|
2005-12-10 06:36:28 +00:00
|
|
|
EXPORT_SYMBOL_GPL(platform_device_unregister);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/**
|
2011-08-25 09:16:00 +00:00
|
|
|
* platform_device_register_full - add a platform-level device with
|
2010-06-21 14:11:44 +00:00
|
|
|
* resources and platform-specific data
|
2007-05-08 07:29:39 +00:00
|
|
|
*
|
2011-08-25 09:16:00 +00:00
|
|
|
* @pdevinfo: data used to create device
|
2008-09-22 21:41:40 +00:00
|
|
|
*
|
2010-03-11 16:11:45 +00:00
|
|
|
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
|
2008-09-22 21:41:40 +00:00
|
|
|
*/
|
2011-08-25 09:16:00 +00:00
|
|
|
struct platform_device *platform_device_register_full(
|
2011-12-08 21:53:29 +00:00
|
|
|
const struct platform_device_info *pdevinfo)
|
2008-09-22 21:41:40 +00:00
|
|
|
{
|
2020-04-02 11:13:41 +00:00
|
|
|
int ret;
|
2008-09-22 21:41:40 +00:00
|
|
|
struct platform_device *pdev;
|
|
|
|
|
2011-08-25 09:16:00 +00:00
|
|
|
pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
|
2010-06-21 14:11:44 +00:00
|
|
|
if (!pdev)
|
2019-03-01 12:24:47 +00:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2011-08-25 09:16:00 +00:00
|
|
|
|
|
|
|
pdev->dev.parent = pdevinfo->parent;
|
2015-03-16 22:49:03 +00:00
|
|
|
pdev->dev.fwnode = pdevinfo->fwnode;
|
2019-02-21 11:29:35 +00:00
|
|
|
pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
|
|
|
|
pdev->dev.of_node_reused = pdevinfo->of_node_reused;
|
2011-08-25 09:16:00 +00:00
|
|
|
|
|
|
|
if (pdevinfo->dma_mask) {
|
driver code: clarify and fix platform device DMA mask allocation
This does three inter-related things to clarify the usage of the
platform device dma_mask field. In the process, fix the bug introduced
by cdfee5623290 ("driver core: initialize a default DMA mask for
platform device") that caused Artem Tashkinov's laptop to not boot with
newer Fedora kernels.
This does:
- First off, rename the field to "platform_dma_mask" to make it
greppable.
We have way too many different random fields called "dma_mask" in
various data structures, where some of them are actual masks, and
some of them are just pointers to the mask. And the structures all
have pointers to each other, or embed each other inside themselves,
and "pdev" sometimes means "platform device" and sometimes it means
"PCI device".
So to make it clear in the code when you actually use this new field,
give it a unique name (it really should be something even more unique
like "platform_device_dma_mask", since it's per platform device, not
per platform, but that gets old really fast, and this is unique
enough in context).
To further clarify when the field gets used, initialize it when we
actually start using it with the default value.
- Then, use this field instead of the random one-off allocation in
platform_device_register_full() that is now unnecessary since we now
already have a perfectly fine allocation for it in the platform
device structure.
- The above then allows us to fix the actual bug, where the error path
of platform_device_register_full() would unconditionally free the
platform device DMA allocation with 'kfree()'.
That kfree() was dont regardless of whether the allocation had been
done earlier with the (now removed) kmalloc, or whether
setup_pdev_dma_masks() had already been used and the dma_mask pointer
pointed to the mask that was part of the platform device.
It seems most people never triggered the error path, or only triggered
it from a call chain that set an explicit pdevinfo->dma_mask value (and
thus caused the unnecessary allocation that was "cleaned up" in the
error path) before calling platform_device_register_full().
Robin Murphy points out that in Artem's case the wdat_wdt driver failed
in platform_device_add(), and that was the one that had called
platform_device_register_full() with pdevinfo.dma_mask = 0, and would
have caused that kfree() of pdev.dma_mask corrupting the heap.
A later unrelated kmalloc() then oopsed due to the heap corruption.
Fixes: cdfee5623290 ("driver core: initialize a default DMA mask for platform device")
Reported-bisected-and-tested-by: Artem S. Tashkinov <aros@gmx.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-03-11 16:07:10 +00:00
|
|
|
pdev->platform_dma_mask = pdevinfo->dma_mask;
|
|
|
|
pdev->dev.dma_mask = &pdev->platform_dma_mask;
|
2011-08-25 09:16:00 +00:00
|
|
|
pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
|
|
|
|
}
|
2008-09-22 21:41:40 +00:00
|
|
|
|
2011-08-25 09:16:00 +00:00
|
|
|
ret = platform_device_add_resources(pdev,
|
|
|
|
pdevinfo->res, pdevinfo->num_res);
|
2010-09-07 13:31:54 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2010-06-21 14:11:44 +00:00
|
|
|
|
2011-08-25 09:16:00 +00:00
|
|
|
ret = platform_device_add_data(pdev,
|
|
|
|
pdevinfo->data, pdevinfo->size_data);
|
2010-09-07 13:31:54 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2008-09-22 21:41:40 +00:00
|
|
|
|
2016-03-29 11:52:23 +00:00
|
|
|
if (pdevinfo->properties) {
|
2021-08-17 10:24:49 +00:00
|
|
|
ret = device_create_managed_software_node(&pdev->dev,
|
|
|
|
pdevinfo->properties, NULL);
|
2015-11-30 15:11:38 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2010-06-21 14:11:44 +00:00
|
|
|
ret = platform_device_add(pdev);
|
|
|
|
if (ret) {
|
|
|
|
err:
|
ACPI / driver core: Store an ACPI device pointer in struct acpi_dev_node
Modify struct acpi_dev_node to contain a pointer to struct acpi_device
associated with the given device object (that is, its ACPI companion
device) instead of an ACPI handle corresponding to it. Introduce two
new macros for manipulating that pointer in a CONFIG_ACPI-safe way,
ACPI_COMPANION() and ACPI_COMPANION_SET(), and rework the
ACPI_HANDLE() macro to take the above changes into account.
Drop the ACPI_HANDLE_SET() macro entirely and rework its users to
use ACPI_COMPANION_SET() instead. For some of them who used to
pass the result of acpi_get_child() directly to ACPI_HANDLE_SET()
introduce a helper routine acpi_preset_companion() doing an
equivalent thing.
The main motivation for doing this is that there are things
represented by struct acpi_device objects that don't have valid
ACPI handles (so called fixed ACPI hardware features, such as
power and sleep buttons) and we would like to create platform
device objects for them and "glue" them to their ACPI companions
in the usual way (which currently is impossible due to the
lack of valid ACPI handles). However, there are more reasons
why it may be useful.
First, struct acpi_device pointers allow of much better type checking
than void pointers which are ACPI handles, so it should be more
difficult to write buggy code using modified struct acpi_dev_node
and the new macros. Second, the change should help to reduce (over
time) the number of places in which the result of ACPI_HANDLE() is
passed to acpi_bus_get_device() in order to obtain a pointer to the
struct acpi_device associated with the given "physical" device,
because now that pointer is returned by ACPI_COMPANION() directly.
Finally, the change should make it easier to write generic code that
will build both for CONFIG_ACPI set and unset without adding explicit
compiler directives to it.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Tested-by: Mika Westerberg <mika.westerberg@linux.intel.com> # on Haswell
Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Reviewed-by: Aaron Lu <aaron.lu@intel.com> # for ATA and SDIO part
2013-11-11 21:41:56 +00:00
|
|
|
ACPI_COMPANION_SET(&pdev->dev, NULL);
|
2010-06-21 14:11:44 +00:00
|
|
|
platform_device_put(pdev);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
2008-09-22 21:41:40 +00:00
|
|
|
|
|
|
|
return pdev;
|
|
|
|
}
|
2011-08-25 09:16:00 +00:00
|
|
|
EXPORT_SYMBOL_GPL(platform_device_register_full);
|
2008-09-22 21:41:40 +00:00
|
|
|
|
2005-11-09 17:23:39 +00:00
|
|
|
/**
|
2013-05-25 04:40:50 +00:00
|
|
|
* __platform_driver_register - register a driver for platform-level devices
|
2008-01-25 06:50:12 +00:00
|
|
|
* @drv: platform driver structure
|
2013-07-15 00:43:06 +00:00
|
|
|
* @owner: owning module/driver
|
2005-11-09 17:23:39 +00:00
|
|
|
*/
|
2013-05-25 04:40:50 +00:00
|
|
|
int __platform_driver_register(struct platform_driver *drv,
|
|
|
|
struct module *owner)
|
2005-11-09 17:23:39 +00:00
|
|
|
{
|
2013-05-25 04:40:50 +00:00
|
|
|
drv->driver.owner = owner;
|
2005-11-09 17:23:39 +00:00
|
|
|
drv->driver.bus = &platform_bus_type;
|
2009-06-04 20:13:33 +00:00
|
|
|
|
2005-11-09 17:23:39 +00:00
|
|
|
return driver_register(&drv->driver);
|
|
|
|
}
|
2013-05-25 04:40:50 +00:00
|
|
|
EXPORT_SYMBOL_GPL(__platform_driver_register);
|
2005-11-09 17:23:39 +00:00
|
|
|
|
|
|
|
/**
|
2010-02-14 14:18:53 +00:00
|
|
|
* platform_driver_unregister - unregister a driver for platform-level devices
|
2008-01-25 06:50:12 +00:00
|
|
|
* @drv: platform driver structure
|
2005-11-09 17:23:39 +00:00
|
|
|
*/
|
|
|
|
void platform_driver_unregister(struct platform_driver *drv)
|
|
|
|
{
|
|
|
|
driver_unregister(&drv->driver);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(platform_driver_unregister);
|
|
|
|
|
2020-11-19 12:46:10 +00:00
|
|
|
static int platform_probe_fail(struct platform_device *pdev)
|
2020-11-19 12:46:09 +00:00
|
|
|
{
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
2023-01-31 08:24:59 +00:00
|
|
|
static int is_bound_to_driver(struct device *dev, void *driver)
|
|
|
|
{
|
|
|
|
if (dev->driver == driver)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-11-17 07:28:47 +00:00
|
|
|
/**
|
2014-10-28 16:40:41 +00:00
|
|
|
* __platform_driver_probe - register driver for non-hotpluggable device
|
2006-11-17 07:28:47 +00:00
|
|
|
* @drv: platform driver structure
|
2013-09-23 14:27:26 +00:00
|
|
|
* @probe: the driver probe routine, probably from an __init section
|
2014-10-28 16:40:41 +00:00
|
|
|
* @module: module which will be the owner of the driver
|
2006-11-17 07:28:47 +00:00
|
|
|
*
|
|
|
|
* Use this instead of platform_driver_register() when you know the device
|
|
|
|
* is not hotpluggable and has already been registered, and you want to
|
|
|
|
* remove its run-once probe() infrastructure from memory after the driver
|
|
|
|
* has bound to the device.
|
|
|
|
*
|
|
|
|
* One typical use for this would be with drivers for controllers integrated
|
|
|
|
* into system-on-chip processors, where the controller devices have been
|
|
|
|
* configured as part of board setup.
|
|
|
|
*
|
2013-09-23 14:27:26 +00:00
|
|
|
* Note that this is incompatible with deferred probing.
|
2013-03-26 09:35:15 +00:00
|
|
|
*
|
2006-11-17 07:28:47 +00:00
|
|
|
* Returns zero if the driver registered and bound to a device, else returns
|
|
|
|
* a negative error code and with the driver not registered.
|
|
|
|
*/
|
2014-10-28 16:40:41 +00:00
|
|
|
int __init_or_module __platform_driver_probe(struct platform_driver *drv,
|
|
|
|
int (*probe)(struct platform_device *), struct module *module)
|
2006-11-17 07:28:47 +00:00
|
|
|
{
|
2023-01-31 08:24:58 +00:00
|
|
|
int retval;
|
2006-11-17 07:28:47 +00:00
|
|
|
|
2015-03-30 23:20:07 +00:00
|
|
|
if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
|
|
|
|
pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
|
|
|
|
drv->driver.name, __func__);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to run our probes synchronously because we check if
|
|
|
|
* we find any devices to bind to and exit with error if there
|
|
|
|
* are any.
|
|
|
|
*/
|
|
|
|
drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
|
|
|
|
|
2013-09-23 14:27:26 +00:00
|
|
|
/*
|
|
|
|
* Prevent driver from requesting probe deferral to avoid further
|
|
|
|
* futile probe attempts.
|
|
|
|
*/
|
|
|
|
drv->prevent_deferred_probe = true;
|
|
|
|
|
2009-10-13 03:17:41 +00:00
|
|
|
/* make sure driver won't have bind/unbind attributes */
|
|
|
|
drv->driver.suppress_bind_attrs = true;
|
|
|
|
|
2006-11-17 07:28:47 +00:00
|
|
|
/* temporary section violation during probe() */
|
|
|
|
drv->probe = probe;
|
2023-01-31 08:24:58 +00:00
|
|
|
retval = __platform_driver_register(drv, module);
|
2020-04-08 21:40:03 +00:00
|
|
|
if (retval)
|
|
|
|
return retval;
|
2006-11-17 07:28:47 +00:00
|
|
|
|
2023-01-31 08:24:59 +00:00
|
|
|
/* Force all new probes of this driver to fail */
|
2020-11-19 12:46:10 +00:00
|
|
|
drv->probe = platform_probe_fail;
|
2006-11-17 07:28:47 +00:00
|
|
|
|
2023-01-31 08:24:59 +00:00
|
|
|
/* Walk all platform devices and see if any actually bound to this driver.
|
|
|
|
* If not, return an error as the device should have done so by now.
|
|
|
|
*/
|
|
|
|
if (!bus_for_each_dev(&platform_bus_type, NULL, &drv->driver, is_bound_to_driver)) {
|
|
|
|
retval = -ENODEV;
|
2006-11-17 07:28:47 +00:00
|
|
|
platform_driver_unregister(drv);
|
2023-01-31 08:24:59 +00:00
|
|
|
}
|
|
|
|
|
2006-11-17 07:28:47 +00:00
|
|
|
return retval;
|
|
|
|
}
|
2014-10-28 16:40:41 +00:00
|
|
|
EXPORT_SYMBOL_GPL(__platform_driver_probe);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-12-30 04:11:20 +00:00
|
|
|
/**
|
2014-10-28 16:40:42 +00:00
|
|
|
* __platform_create_bundle - register driver and create corresponding device
|
2009-12-30 04:11:20 +00:00
|
|
|
* @driver: platform driver structure
|
|
|
|
* @probe: the driver probe routine, probably from an __init section
|
|
|
|
* @res: set of resources that needs to be allocated for the device
|
|
|
|
* @n_res: number of resources
|
|
|
|
* @data: platform specific data for this platform device
|
|
|
|
* @size: size of platform specific data
|
2014-10-28 16:40:42 +00:00
|
|
|
* @module: module which will be the owner of the driver
|
2009-12-30 04:11:20 +00:00
|
|
|
*
|
|
|
|
* Use this in legacy-style modules that probe hardware directly and
|
|
|
|
* register a single platform device and corresponding platform driver.
|
2010-03-11 16:11:45 +00:00
|
|
|
*
|
|
|
|
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
|
2009-12-30 04:11:20 +00:00
|
|
|
*/
|
2014-10-28 16:40:42 +00:00
|
|
|
struct platform_device * __init_or_module __platform_create_bundle(
|
2009-12-30 04:11:20 +00:00
|
|
|
struct platform_driver *driver,
|
|
|
|
int (*probe)(struct platform_device *),
|
|
|
|
struct resource *res, unsigned int n_res,
|
2014-10-28 16:40:42 +00:00
|
|
|
const void *data, size_t size, struct module *module)
|
2009-12-30 04:11:20 +00:00
|
|
|
{
|
|
|
|
struct platform_device *pdev;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
pdev = platform_device_alloc(driver->driver.name, -1);
|
|
|
|
if (!pdev) {
|
|
|
|
error = -ENOMEM;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
2010-09-07 13:31:54 +00:00
|
|
|
error = platform_device_add_resources(pdev, res, n_res);
|
|
|
|
if (error)
|
|
|
|
goto err_pdev_put;
|
2009-12-30 04:11:20 +00:00
|
|
|
|
2010-09-07 13:31:54 +00:00
|
|
|
error = platform_device_add_data(pdev, data, size);
|
|
|
|
if (error)
|
|
|
|
goto err_pdev_put;
|
2009-12-30 04:11:20 +00:00
|
|
|
|
|
|
|
error = platform_device_add(pdev);
|
|
|
|
if (error)
|
|
|
|
goto err_pdev_put;
|
|
|
|
|
2014-10-28 16:40:42 +00:00
|
|
|
error = __platform_driver_probe(driver, probe, module);
|
2009-12-30 04:11:20 +00:00
|
|
|
if (error)
|
|
|
|
goto err_pdev_del;
|
|
|
|
|
|
|
|
return pdev;
|
|
|
|
|
|
|
|
err_pdev_del:
|
|
|
|
platform_device_del(pdev);
|
|
|
|
err_pdev_put:
|
|
|
|
platform_device_put(pdev);
|
|
|
|
err_out:
|
|
|
|
return ERR_PTR(error);
|
|
|
|
}
|
2014-10-28 16:40:42 +00:00
|
|
|
EXPORT_SYMBOL_GPL(__platform_create_bundle);
|
2009-12-30 04:11:20 +00:00
|
|
|
|
2015-09-25 15:29:04 +00:00
|
|
|
/**
|
|
|
|
* __platform_register_drivers - register an array of platform drivers
|
|
|
|
* @drivers: an array of drivers to register
|
|
|
|
* @count: the number of drivers to register
|
|
|
|
* @owner: module owning the drivers
|
|
|
|
*
|
|
|
|
* Registers platform drivers specified by an array. On failure to register a
|
|
|
|
* driver, all previously registered drivers will be unregistered. Callers of
|
|
|
|
* this API should use platform_unregister_drivers() to unregister drivers in
|
|
|
|
* the reverse order.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success or a negative error code on failure.
|
|
|
|
*/
|
|
|
|
int __platform_register_drivers(struct platform_driver * const *drivers,
|
|
|
|
unsigned int count, struct module *owner)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
pr_debug("registering platform driver %ps\n", drivers[i]);
|
|
|
|
|
|
|
|
err = __platform_driver_register(drivers[i], owner);
|
|
|
|
if (err < 0) {
|
|
|
|
pr_err("failed to register platform driver %ps: %d\n",
|
|
|
|
drivers[i], err);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
while (i--) {
|
|
|
|
pr_debug("unregistering platform driver %ps\n", drivers[i]);
|
|
|
|
platform_driver_unregister(drivers[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__platform_register_drivers);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* platform_unregister_drivers - unregister an array of platform drivers
|
|
|
|
* @drivers: an array of drivers to unregister
|
|
|
|
* @count: the number of drivers to unregister
|
|
|
|
*
|
2020-05-20 14:12:02 +00:00
|
|
|
* Unregisters platform drivers specified by an array. This is typically used
|
2015-09-25 15:29:04 +00:00
|
|
|
* to complement an earlier call to platform_register_drivers(). Drivers are
|
|
|
|
* unregistered in the reverse order in which they were registered.
|
|
|
|
*/
|
|
|
|
void platform_unregister_drivers(struct platform_driver * const *drivers,
|
|
|
|
unsigned int count)
|
|
|
|
{
|
|
|
|
while (count--) {
|
|
|
|
pr_debug("unregistering platform driver %ps\n", drivers[count]);
|
|
|
|
platform_driver_unregister(drivers[count]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(platform_unregister_drivers);
|
|
|
|
|
2009-02-04 03:52:40 +00:00
|
|
|
static const struct platform_device_id *platform_match_id(
|
2010-01-26 08:35:00 +00:00
|
|
|
const struct platform_device_id *id,
|
2009-02-04 03:52:40 +00:00
|
|
|
struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
while (id->name[0]) {
|
2019-04-29 17:49:21 +00:00
|
|
|
if (strcmp(pdev->name, id->name) == 0) {
|
2009-02-04 03:52:40 +00:00
|
|
|
pdev->id_entry = id;
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
id++;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2008-05-20 23:40:43 +00:00
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
|
|
|
|
|
static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2009-06-04 20:13:33 +00:00
|
|
|
struct platform_driver *pdrv = to_platform_driver(dev->driver);
|
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2009-06-04 20:13:33 +00:00
|
|
|
if (dev->driver && pdrv->suspend)
|
|
|
|
ret = pdrv->suspend(pdev, mesg);
|
2006-09-03 20:16:45 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-05-20 23:40:43 +00:00
|
|
|
static int platform_legacy_resume(struct device *dev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2009-06-04 20:13:33 +00:00
|
|
|
struct platform_driver *pdrv = to_platform_driver(dev->driver);
|
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2009-06-04 20:13:33 +00:00
|
|
|
if (dev->driver && pdrv->resume)
|
|
|
|
ret = pdrv->resume(pdev);
|
2005-10-28 16:52:56 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-04-28 22:36:05 +00:00
|
|
|
#endif /* CONFIG_PM_SLEEP */
|
2009-08-20 18:25:32 +00:00
|
|
|
|
2008-05-20 23:40:43 +00:00
|
|
|
#ifdef CONFIG_SUSPEND
|
|
|
|
|
2011-04-28 22:36:05 +00:00
|
|
|
int platform_pm_suspend(struct device *dev)
|
2008-05-20 23:40:43 +00:00
|
|
|
{
|
|
|
|
struct device_driver *drv = dev->driver;
|
|
|
|
int ret = 0;
|
|
|
|
|
2008-10-06 20:46:05 +00:00
|
|
|
if (!drv)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (drv->pm) {
|
2008-05-20 23:40:43 +00:00
|
|
|
if (drv->pm->suspend)
|
|
|
|
ret = drv->pm->suspend(dev);
|
|
|
|
} else {
|
|
|
|
ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-04-28 22:36:05 +00:00
|
|
|
int platform_pm_resume(struct device *dev)
|
2008-05-20 23:40:43 +00:00
|
|
|
{
|
|
|
|
struct device_driver *drv = dev->driver;
|
|
|
|
int ret = 0;
|
|
|
|
|
2008-10-06 20:46:05 +00:00
|
|
|
if (!drv)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (drv->pm) {
|
2008-05-20 23:40:43 +00:00
|
|
|
if (drv->pm->resume)
|
|
|
|
ret = drv->pm->resume(dev);
|
|
|
|
} else {
|
|
|
|
ret = platform_legacy_resume(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-04-28 22:36:05 +00:00
|
|
|
#endif /* CONFIG_SUSPEND */
|
2008-05-20 23:40:43 +00:00
|
|
|
|
2011-04-11 20:54:42 +00:00
|
|
|
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
2008-05-20 23:40:43 +00:00
|
|
|
|
2011-04-28 22:36:05 +00:00
|
|
|
int platform_pm_freeze(struct device *dev)
|
2008-05-20 23:40:43 +00:00
|
|
|
{
|
|
|
|
struct device_driver *drv = dev->driver;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!drv)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (drv->pm) {
|
|
|
|
if (drv->pm->freeze)
|
|
|
|
ret = drv->pm->freeze(dev);
|
|
|
|
} else {
|
|
|
|
ret = platform_legacy_suspend(dev, PMSG_FREEZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-04-28 22:36:05 +00:00
|
|
|
int platform_pm_thaw(struct device *dev)
|
2008-05-20 23:40:43 +00:00
|
|
|
{
|
|
|
|
struct device_driver *drv = dev->driver;
|
|
|
|
int ret = 0;
|
|
|
|
|
2008-10-06 20:46:05 +00:00
|
|
|
if (!drv)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (drv->pm) {
|
2008-05-20 23:40:43 +00:00
|
|
|
if (drv->pm->thaw)
|
|
|
|
ret = drv->pm->thaw(dev);
|
|
|
|
} else {
|
|
|
|
ret = platform_legacy_resume(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-04-28 22:36:05 +00:00
|
|
|
int platform_pm_poweroff(struct device *dev)
|
2008-05-20 23:40:43 +00:00
|
|
|
{
|
|
|
|
struct device_driver *drv = dev->driver;
|
|
|
|
int ret = 0;
|
|
|
|
|
2008-10-06 20:46:05 +00:00
|
|
|
if (!drv)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (drv->pm) {
|
2008-05-20 23:40:43 +00:00
|
|
|
if (drv->pm->poweroff)
|
|
|
|
ret = drv->pm->poweroff(dev);
|
|
|
|
} else {
|
|
|
|
ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-04-28 22:36:05 +00:00
|
|
|
int platform_pm_restore(struct device *dev)
|
2008-05-20 23:40:43 +00:00
|
|
|
{
|
|
|
|
struct device_driver *drv = dev->driver;
|
|
|
|
int ret = 0;
|
|
|
|
|
2008-10-06 20:46:05 +00:00
|
|
|
if (!drv)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (drv->pm) {
|
2008-05-20 23:40:43 +00:00
|
|
|
if (drv->pm->restore)
|
|
|
|
ret = drv->pm->restore(dev);
|
|
|
|
} else {
|
|
|
|
ret = platform_legacy_resume(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-04-28 22:36:05 +00:00
|
|
|
#endif /* CONFIG_HIBERNATE_CALLBACKS */
|
2008-05-20 23:40:43 +00:00
|
|
|
|
2020-11-19 12:46:09 +00:00
|
|
|
/* modalias support enables more hands-off userspace setup:
|
|
|
|
* (a) environment variable lets new-style hotplug events work once system is
|
|
|
|
* fully running: "modprobe $MODALIAS"
|
|
|
|
* (b) sysfs attribute lets new-style coldplug recover from hotplug events
|
|
|
|
* mishandled before system is fully running: "modprobe $(cat modalias)"
|
|
|
|
*/
|
|
|
|
static ssize_t modalias_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
|
|
int len;
|
|
|
|
|
|
|
|
len = of_device_modalias(dev, buf, PAGE_SIZE);
|
|
|
|
if (len != -ENODEV)
|
|
|
|
return len;
|
|
|
|
|
|
|
|
len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
|
|
|
|
if (len != -ENODEV)
|
|
|
|
return len;
|
|
|
|
|
|
|
|
return sysfs_emit(buf, "platform:%s\n", pdev->name);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(modalias);
|
|
|
|
|
|
|
|
static ssize_t numa_node_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
return sysfs_emit(buf, "%d\n", dev_to_node(dev));
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(numa_node);
|
|
|
|
|
|
|
|
static ssize_t driver_override_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
|
|
ssize_t len;
|
|
|
|
|
|
|
|
device_lock(dev);
|
|
|
|
len = sysfs_emit(buf, "%s\n", pdev->driver_override);
|
|
|
|
device_unlock(dev);
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t driver_override_store(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
2022-04-19 11:34:24 +00:00
|
|
|
int ret;
|
2020-11-19 12:46:09 +00:00
|
|
|
|
2022-04-19 11:34:24 +00:00
|
|
|
ret = driver_set_override(dev, &pdev->driver_override, buf, count);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2020-11-19 12:46:09 +00:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(driver_override);
|
|
|
|
|
|
|
|
static struct attribute *platform_dev_attrs[] = {
|
|
|
|
&dev_attr_modalias.attr,
|
|
|
|
&dev_attr_numa_node.attr,
|
|
|
|
&dev_attr_driver_override.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
|
|
|
|
int n)
|
|
|
|
{
|
|
|
|
struct device *dev = container_of(kobj, typeof(*dev), kobj);
|
|
|
|
|
|
|
|
if (a == &dev_attr_numa_node.attr &&
|
|
|
|
dev_to_node(dev) == NUMA_NO_NODE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return a->mode;
|
|
|
|
}
|
|
|
|
|
2021-05-28 21:34:08 +00:00
|
|
|
static const struct attribute_group platform_dev_group = {
|
2020-11-19 12:46:09 +00:00
|
|
|
.attrs = platform_dev_attrs,
|
|
|
|
.is_visible = platform_dev_attrs_visible,
|
|
|
|
};
|
|
|
|
__ATTRIBUTE_GROUPS(platform_dev);
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* platform_match - bind platform device to platform driver.
|
|
|
|
* @dev: device.
|
|
|
|
* @drv: driver.
|
|
|
|
*
|
|
|
|
* Platform device IDs are assumed to be encoded like this:
|
|
|
|
* "<name><instance>", where <name> is a short description of the type of
|
|
|
|
* device, like "pci" or "floppy", and <instance> is the enumerated
|
|
|
|
* instance of the device, like '0' or '42'. Driver IDs are simply
|
|
|
|
* "<name>". So, extract the <name> from the platform_device structure,
|
|
|
|
* and compare it against the name of the driver. Return whether they match
|
|
|
|
* or not.
|
|
|
|
*/
|
|
|
|
static int platform_match(struct device *dev, struct device_driver *drv)
|
|
|
|
{
|
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
|
|
struct platform_driver *pdrv = to_platform_driver(drv);
|
|
|
|
|
|
|
|
/* When driver_override is set, only bind to the matching driver */
|
|
|
|
if (pdev->driver_override)
|
|
|
|
return !strcmp(pdev->driver_override, drv->name);
|
|
|
|
|
|
|
|
/* Attempt an OF style match first */
|
|
|
|
if (of_driver_match_device(dev, drv))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* Then try ACPI style match */
|
|
|
|
if (acpi_driver_match_device(dev, drv))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* Then try to match against the id table */
|
|
|
|
if (pdrv->id_table)
|
|
|
|
return platform_match_id(pdrv->id_table, pdev) != NULL;
|
|
|
|
|
|
|
|
/* fall-back to driver name match */
|
|
|
|
return (strcmp(pdev->name, drv->name) == 0);
|
|
|
|
}
|
|
|
|
|
2023-01-11 11:30:17 +00:00
|
|
|
static int platform_uevent(const struct device *dev, struct kobj_uevent_env *env)
|
2020-11-19 12:46:09 +00:00
|
|
|
{
|
2023-01-11 11:30:17 +00:00
|
|
|
const struct platform_device *pdev = to_platform_device(dev);
|
2020-11-19 12:46:09 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* Some devices have extra OF data and an OF-style MODALIAS */
|
|
|
|
rc = of_device_uevent_modalias(dev, env);
|
|
|
|
if (rc != -ENODEV)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
rc = acpi_device_uevent_modalias(dev, env);
|
|
|
|
if (rc != -ENODEV)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
|
|
|
|
pdev->name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-11-19 12:46:11 +00:00
|
|
|
static int platform_probe(struct device *_dev)
|
|
|
|
{
|
|
|
|
struct platform_driver *drv = to_platform_driver(_dev->driver);
|
|
|
|
struct platform_device *dev = to_platform_device(_dev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A driver registered using platform_driver_probe() cannot be bound
|
|
|
|
* again later because the probe function usually lives in __init code
|
|
|
|
* and so is gone. For these drivers .probe is set to
|
|
|
|
* platform_probe_fail in __platform_driver_probe(). Don't even prepare
|
|
|
|
* clocks and PM domains for these to match the traditional behaviour.
|
|
|
|
*/
|
|
|
|
if (unlikely(drv->probe == platform_probe_fail))
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
ret = of_clk_set_defaults(_dev->of_node, false);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = dev_pm_domain_attach(_dev, true);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (drv->probe) {
|
|
|
|
ret = drv->probe(dev);
|
|
|
|
if (ret)
|
|
|
|
dev_pm_domain_detach(_dev, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
|
|
|
|
dev_warn(_dev, "probe deferral not supported\n");
|
|
|
|
ret = -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-07-13 19:35:22 +00:00
|
|
|
static void platform_remove(struct device *_dev)
|
2020-11-19 12:46:11 +00:00
|
|
|
{
|
|
|
|
struct platform_driver *drv = to_platform_driver(_dev->driver);
|
|
|
|
struct platform_device *dev = to_platform_device(_dev);
|
|
|
|
|
2022-12-09 15:09:14 +00:00
|
|
|
if (drv->remove_new) {
|
|
|
|
drv->remove_new(dev);
|
|
|
|
} else if (drv->remove) {
|
2021-02-07 21:15:37 +00:00
|
|
|
int ret = drv->remove(dev);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
|
|
|
|
}
|
2020-11-19 12:46:11 +00:00
|
|
|
dev_pm_domain_detach(_dev, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void platform_shutdown(struct device *_dev)
|
|
|
|
{
|
|
|
|
struct platform_device *dev = to_platform_device(_dev);
|
2020-12-12 23:55:33 +00:00
|
|
|
struct platform_driver *drv;
|
|
|
|
|
|
|
|
if (!_dev->driver)
|
|
|
|
return;
|
2020-11-19 12:46:11 +00:00
|
|
|
|
2020-12-12 23:55:33 +00:00
|
|
|
drv = to_platform_driver(_dev->driver);
|
2020-11-19 12:46:11 +00:00
|
|
|
if (drv->shutdown)
|
|
|
|
drv->shutdown(dev);
|
|
|
|
}
|
|
|
|
|
2022-04-18 00:49:52 +00:00
|
|
|
static int platform_dma_configure(struct device *dev)
|
2018-04-28 02:51:58 +00:00
|
|
|
{
|
bus: platform,amba,fsl-mc,PCI: Add device DMA ownership management
The devices on platform/amba/fsl-mc/PCI buses could be bound to drivers
with the device DMA managed by kernel drivers or user-space applications.
Unfortunately, multiple devices may be placed in the same IOMMU group
because they cannot be isolated from each other. The DMA on these devices
must either be entirely under kernel control or userspace control, never
a mixture. Otherwise the driver integrity is not guaranteed because they
could access each other through the peer-to-peer accesses which by-pass
the IOMMU protection.
This checks and sets the default DMA mode during driver binding, and
cleanups during driver unbinding. In the default mode, the device DMA is
managed by the device driver which handles DMA operations through the
kernel DMA APIs (see Documentation/core-api/dma-api.rst).
For cases where the devices are assigned for userspace control through the
userspace driver framework(i.e. VFIO), the drivers(for example, vfio_pci/
vfio_platfrom etc.) may set a new flag (driver_managed_dma) to skip this
default setting in the assumption that the drivers know what they are
doing with the device DMA.
Calling iommu_device_use_default_domain() before {of,acpi}_dma_configure
is currently a problem. As things stand, the IOMMU driver ignored the
initial iommu_probe_device() call when the device was added, since at
that point it had no fwspec yet. In this situation,
{of,acpi}_iommu_configure() are retriggering iommu_probe_device() after
the IOMMU driver has seen the firmware data via .of_xlate to learn that
it actually responsible for the given device. As the result, before
that gets fixed, iommu_use_default_domain() goes at the end, and calls
arch_teardown_dma_ops() if it fails.
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Stuart Yoder <stuyoder@gmail.com>
Cc: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Tested-by: Eric Auger <eric.auger@redhat.com>
Link: https://lore.kernel.org/r/20220418005000.897664-5-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-04-18 00:49:53 +00:00
|
|
|
struct platform_driver *drv = to_platform_driver(dev->driver);
|
2023-10-03 14:21:22 +00:00
|
|
|
struct fwnode_handle *fwnode = dev_fwnode(dev);
|
2018-04-28 02:51:58 +00:00
|
|
|
enum dev_dma_attr attr;
|
|
|
|
int ret = 0;
|
|
|
|
|
2023-10-03 14:21:22 +00:00
|
|
|
if (is_of_node(fwnode)) {
|
|
|
|
ret = of_dma_configure(dev, to_of_node(fwnode), true);
|
|
|
|
} else if (is_acpi_device_node(fwnode)) {
|
|
|
|
attr = acpi_get_dma_attr(to_acpi_device_node(fwnode));
|
2018-12-06 21:20:49 +00:00
|
|
|
ret = acpi_dma_configure(dev, attr);
|
2018-04-28 02:51:58 +00:00
|
|
|
}
|
2023-10-03 14:21:20 +00:00
|
|
|
if (ret || drv->driver_managed_dma)
|
|
|
|
return ret;
|
2018-04-28 02:51:58 +00:00
|
|
|
|
2023-10-03 14:21:20 +00:00
|
|
|
ret = iommu_device_use_default_domain(dev);
|
|
|
|
if (ret)
|
|
|
|
arch_teardown_dma_ops(dev);
|
bus: platform,amba,fsl-mc,PCI: Add device DMA ownership management
The devices on platform/amba/fsl-mc/PCI buses could be bound to drivers
with the device DMA managed by kernel drivers or user-space applications.
Unfortunately, multiple devices may be placed in the same IOMMU group
because they cannot be isolated from each other. The DMA on these devices
must either be entirely under kernel control or userspace control, never
a mixture. Otherwise the driver integrity is not guaranteed because they
could access each other through the peer-to-peer accesses which by-pass
the IOMMU protection.
This checks and sets the default DMA mode during driver binding, and
cleanups during driver unbinding. In the default mode, the device DMA is
managed by the device driver which handles DMA operations through the
kernel DMA APIs (see Documentation/core-api/dma-api.rst).
For cases where the devices are assigned for userspace control through the
userspace driver framework(i.e. VFIO), the drivers(for example, vfio_pci/
vfio_platfrom etc.) may set a new flag (driver_managed_dma) to skip this
default setting in the assumption that the drivers know what they are
doing with the device DMA.
Calling iommu_device_use_default_domain() before {of,acpi}_dma_configure
is currently a problem. As things stand, the IOMMU driver ignored the
initial iommu_probe_device() call when the device was added, since at
that point it had no fwspec yet. In this situation,
{of,acpi}_iommu_configure() are retriggering iommu_probe_device() after
the IOMMU driver has seen the firmware data via .of_xlate to learn that
it actually responsible for the given device. As the result, before
that gets fixed, iommu_use_default_domain() goes at the end, and calls
arch_teardown_dma_ops() if it fails.
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Stuart Yoder <stuyoder@gmail.com>
Cc: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Tested-by: Eric Auger <eric.auger@redhat.com>
Link: https://lore.kernel.org/r/20220418005000.897664-5-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-04-18 00:49:53 +00:00
|
|
|
|
2018-04-28 02:51:58 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
bus: platform,amba,fsl-mc,PCI: Add device DMA ownership management
The devices on platform/amba/fsl-mc/PCI buses could be bound to drivers
with the device DMA managed by kernel drivers or user-space applications.
Unfortunately, multiple devices may be placed in the same IOMMU group
because they cannot be isolated from each other. The DMA on these devices
must either be entirely under kernel control or userspace control, never
a mixture. Otherwise the driver integrity is not guaranteed because they
could access each other through the peer-to-peer accesses which by-pass
the IOMMU protection.
This checks and sets the default DMA mode during driver binding, and
cleanups during driver unbinding. In the default mode, the device DMA is
managed by the device driver which handles DMA operations through the
kernel DMA APIs (see Documentation/core-api/dma-api.rst).
For cases where the devices are assigned for userspace control through the
userspace driver framework(i.e. VFIO), the drivers(for example, vfio_pci/
vfio_platfrom etc.) may set a new flag (driver_managed_dma) to skip this
default setting in the assumption that the drivers know what they are
doing with the device DMA.
Calling iommu_device_use_default_domain() before {of,acpi}_dma_configure
is currently a problem. As things stand, the IOMMU driver ignored the
initial iommu_probe_device() call when the device was added, since at
that point it had no fwspec yet. In this situation,
{of,acpi}_iommu_configure() are retriggering iommu_probe_device() after
the IOMMU driver has seen the firmware data via .of_xlate to learn that
it actually responsible for the given device. As the result, before
that gets fixed, iommu_use_default_domain() goes at the end, and calls
arch_teardown_dma_ops() if it fails.
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Stuart Yoder <stuyoder@gmail.com>
Cc: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Tested-by: Eric Auger <eric.auger@redhat.com>
Link: https://lore.kernel.org/r/20220418005000.897664-5-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-04-18 00:49:53 +00:00
|
|
|
static void platform_dma_cleanup(struct device *dev)
|
|
|
|
{
|
|
|
|
struct platform_driver *drv = to_platform_driver(dev->driver);
|
|
|
|
|
|
|
|
if (!drv->driver_managed_dma)
|
|
|
|
iommu_device_unuse_default_domain(dev);
|
|
|
|
}
|
|
|
|
|
2009-07-21 22:37:25 +00:00
|
|
|
static const struct dev_pm_ops platform_dev_pm_ops = {
|
2021-08-28 09:02:19 +00:00
|
|
|
SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
|
2011-04-28 22:36:05 +00:00
|
|
|
USE_PLATFORM_PM_SLEEP_OPS
|
2008-05-20 23:40:43 +00:00
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
struct bus_type platform_bus_type = {
|
|
|
|
.name = "platform",
|
2013-08-23 21:24:37 +00:00
|
|
|
.dev_groups = platform_dev_groups,
|
2005-04-16 22:20:36 +00:00
|
|
|
.match = platform_match,
|
2006-05-29 17:37:33 +00:00
|
|
|
.uevent = platform_uevent,
|
2020-11-19 12:46:11 +00:00
|
|
|
.probe = platform_probe,
|
|
|
|
.remove = platform_remove,
|
|
|
|
.shutdown = platform_shutdown,
|
2018-04-28 02:51:58 +00:00
|
|
|
.dma_configure = platform_dma_configure,
|
bus: platform,amba,fsl-mc,PCI: Add device DMA ownership management
The devices on platform/amba/fsl-mc/PCI buses could be bound to drivers
with the device DMA managed by kernel drivers or user-space applications.
Unfortunately, multiple devices may be placed in the same IOMMU group
because they cannot be isolated from each other. The DMA on these devices
must either be entirely under kernel control or userspace control, never
a mixture. Otherwise the driver integrity is not guaranteed because they
could access each other through the peer-to-peer accesses which by-pass
the IOMMU protection.
This checks and sets the default DMA mode during driver binding, and
cleanups during driver unbinding. In the default mode, the device DMA is
managed by the device driver which handles DMA operations through the
kernel DMA APIs (see Documentation/core-api/dma-api.rst).
For cases where the devices are assigned for userspace control through the
userspace driver framework(i.e. VFIO), the drivers(for example, vfio_pci/
vfio_platfrom etc.) may set a new flag (driver_managed_dma) to skip this
default setting in the assumption that the drivers know what they are
doing with the device DMA.
Calling iommu_device_use_default_domain() before {of,acpi}_dma_configure
is currently a problem. As things stand, the IOMMU driver ignored the
initial iommu_probe_device() call when the device was added, since at
that point it had no fwspec yet. In this situation,
{of,acpi}_iommu_configure() are retriggering iommu_probe_device() after
the IOMMU driver has seen the firmware data via .of_xlate to learn that
it actually responsible for the given device. As the result, before
that gets fixed, iommu_use_default_domain() goes at the end, and calls
arch_teardown_dma_ops() if it fails.
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Stuart Yoder <stuyoder@gmail.com>
Cc: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Tested-by: Eric Auger <eric.auger@redhat.com>
Link: https://lore.kernel.org/r/20220418005000.897664-5-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-04-18 00:49:53 +00:00
|
|
|
.dma_cleanup = platform_dma_cleanup,
|
2009-08-20 18:25:32 +00:00
|
|
|
.pm = &platform_dev_pm_ops,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
2005-12-10 06:36:28 +00:00
|
|
|
EXPORT_SYMBOL_GPL(platform_bus_type);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-11-12 21:41:56 +00:00
|
|
|
static inline int __platform_match(struct device *dev, const void *drv)
|
|
|
|
{
|
|
|
|
return platform_match(dev, (struct device_driver *)drv);
|
|
|
|
}
|
|
|
|
|
2019-07-23 22:18:38 +00:00
|
|
|
/**
|
|
|
|
* platform_find_device_by_driver - Find a platform device with a given
|
|
|
|
* driver.
|
|
|
|
* @start: The device to start the search from.
|
|
|
|
* @drv: The device driver to look for.
|
|
|
|
*/
|
|
|
|
struct device *platform_find_device_by_driver(struct device *start,
|
|
|
|
const struct device_driver *drv)
|
|
|
|
{
|
|
|
|
return bus_find_device(&platform_bus_type, start, drv,
|
2019-11-12 21:41:56 +00:00
|
|
|
__platform_match);
|
2019-07-23 22:18:38 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
|
|
|
|
|
2019-12-03 20:58:52 +00:00
|
|
|
void __weak __init early_platform_cleanup(void) { }
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
int __init platform_bus_init(void)
|
|
|
|
{
|
2006-11-27 09:35:08 +00:00
|
|
|
int error;
|
|
|
|
|
2019-12-03 20:58:52 +00:00
|
|
|
early_platform_cleanup();
|
|
|
|
|
2006-11-27 09:35:08 +00:00
|
|
|
error = device_register(&platform_bus);
|
2018-03-11 05:55:49 +00:00
|
|
|
if (error) {
|
|
|
|
put_device(&platform_bus);
|
2006-11-27 09:35:08 +00:00
|
|
|
return error;
|
2018-03-11 05:55:49 +00:00
|
|
|
}
|
2006-11-27 09:35:08 +00:00
|
|
|
error = bus_register(&platform_bus_type);
|
|
|
|
if (error)
|
|
|
|
device_unregister(&platform_bus);
|
2023-07-17 14:37:16 +00:00
|
|
|
|
2006-11-27 09:35:08 +00:00
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|