Merge tag 'spi-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
Pull spi updates from Mark Brown: "This is quite a quiet release for SPI, there's been a bit of cleanup to the core from Uwe but nothing functionality wise. We have added several new drivers, Cadence XSPI, Ingenic JZ47xx, Qualcomm SC7280 and SC7180 and Xilinx Versal OSPI" * tag 'spi-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (41 commits) spi: Convert NXP flexspi to json schema spi: spi-geni-qcom: Add support for GPI dma spi: fsi: Fix contention in the FSI2SPI engine spi: spi-rpc-if: Check return value of rpcif_sw_init() spi: tegra210-quad: Put device into suspend on driver removal spi: tegra20-slink: Put device into suspend on driver removal spi: bcm-qspi: Fix missing clk_disable_unprepare() on error in bcm_qspi_probe() spi: at91-usart: replacing legacy gpio interface for gpiod spi: replace snprintf in show functions with sysfs_emit spi: cadence: Add of_node_put() before return spi: orion: Add of_node_put() before goto spi: cadence-quadspi: fix dma_unmap_single() call spi: tegra20: fix build with CONFIG_PM_SLEEP=n spi: bcm-qspi: add support for 3-wire mode for half duplex transfer spi: bcm-qspi: Add mspi spcr3 32/64-bits xfer mode spi: Make several public functions private to spi.c spi: Reorder functions to simplify the next commit spi: Remove unused function spi_busnum_to_master() spi: Move comment about chipselect check to the right place spi: fsi: Print status on error ...
This commit is contained in:
@@ -11,6 +11,14 @@ maintainers:
|
||||
|
||||
allOf:
|
||||
- $ref: spi-controller.yaml#
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: xlnx,versal-ospi-1.0
|
||||
then:
|
||||
required:
|
||||
- power-domains
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
@@ -20,6 +28,7 @@ properties:
|
||||
- ti,k2g-qspi
|
||||
- ti,am654-ospi
|
||||
- intel,lgm-qspi
|
||||
- xlnx,versal-ospi-1.0
|
||||
- const: cdns,qspi-nor
|
||||
- const: cdns,qspi-nor
|
||||
|
||||
@@ -65,6 +74,9 @@ properties:
|
||||
data rather than the QSPI clock. Make sure that QSPI return clock
|
||||
is populated on the board before using this property.
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
resets:
|
||||
maxItems: 2
|
||||
|
||||
|
||||
77
Documentation/devicetree/bindings/spi/cdns,xspi.yaml
Normal file
77
Documentation/devicetree/bindings/spi/cdns,xspi.yaml
Normal file
@@ -0,0 +1,77 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
|
||||
# Copyright 2020-21 Cadence
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: "http://devicetree.org/schemas/spi/cdns,xspi.yaml#"
|
||||
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
|
||||
|
||||
title: Cadence XSPI Controller
|
||||
|
||||
maintainers:
|
||||
- Parshuram Thombare <pthombar@cadence.com>
|
||||
|
||||
description: |
|
||||
The XSPI controller allows SPI protocol communication in
|
||||
single, dual, quad or octal wire transmission modes for
|
||||
read/write access to slaves such as SPI-NOR flash.
|
||||
|
||||
allOf:
|
||||
- $ref: "spi-controller.yaml#"
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: cdns,xspi-nor
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: address and length of the controller register set
|
||||
- description: address and length of the Slave DMA data port
|
||||
- description: address and length of the auxiliary registers
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: io
|
||||
- const: sdma
|
||||
- const: aux
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
bus {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
xspi: spi@a0010000 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
compatible = "cdns,xspi-nor";
|
||||
reg = <0x0 0xa0010000 0x0 0x1040>,
|
||||
<0x0 0xb0000000 0x0 0x1000>,
|
||||
<0x0 0xa0020000 0x0 0x100>;
|
||||
reg-names = "io", "sdma", "aux";
|
||||
interrupts = <0 90 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&gic>;
|
||||
|
||||
flash@0 {
|
||||
compatible = "jedec,spi-nor";
|
||||
spi-max-frequency = <75000000>;
|
||||
reg = <0>;
|
||||
};
|
||||
|
||||
flash@1 {
|
||||
compatible = "jedec,spi-nor";
|
||||
spi-max-frequency = <75000000>;
|
||||
reg = <1>;
|
||||
};
|
||||
};
|
||||
};
|
||||
72
Documentation/devicetree/bindings/spi/ingenic,spi.yaml
Normal file
72
Documentation/devicetree/bindings/spi/ingenic,spi.yaml
Normal file
@@ -0,0 +1,72 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/spi/ingenic,spi.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Ingenic SoCs SPI controller devicetree bindings
|
||||
|
||||
maintainers:
|
||||
- Artur Rojek <contact@artur-rojek.eu>
|
||||
- Paul Cercueil <paul@crapouillou.net>
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/spi/spi-controller.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
oneOf:
|
||||
- enum:
|
||||
- ingenic,jz4750-spi
|
||||
- ingenic,jz4780-spi
|
||||
- items:
|
||||
- enum:
|
||||
- ingenic,jz4760-spi
|
||||
- ingenic,jz4770-spi
|
||||
- const: ingenic,jz4750-spi
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
dmas:
|
||||
maxItems: 2
|
||||
minItems: 2
|
||||
|
||||
dma-names:
|
||||
items:
|
||||
- const: rx
|
||||
- const: tx
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- clocks
|
||||
- dmas
|
||||
- dma-names
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/jz4770-cgu.h>
|
||||
spi@10043000 {
|
||||
compatible = "ingenic,jz4770-spi", "ingenic,jz4750-spi";
|
||||
reg = <0x10043000 0x1c>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
interrupt-parent = <&intc>;
|
||||
interrupts = <8>;
|
||||
|
||||
clocks = <&cgu JZ4770_CLK_SSI0>;
|
||||
|
||||
dmas = <&dmac1 23 0xffffffff>, <&dmac1 22 0xffffffff>;
|
||||
dma-names = "rx", "tx";
|
||||
};
|
||||
@@ -21,7 +21,11 @@ allOf:
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: qcom,sdm845-qspi
|
||||
- enum:
|
||||
- qcom,sc7180-qspi
|
||||
- qcom,sc7280-qspi
|
||||
- qcom,sdm845-qspi
|
||||
|
||||
- const: qcom,qspi-v1
|
||||
|
||||
reg:
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
* NXP Flex Serial Peripheral Interface (FSPI)
|
||||
|
||||
Required properties:
|
||||
- compatible : Should be "nxp,lx2160a-fspi"
|
||||
"nxp,imx8qxp-fspi"
|
||||
"nxp,imx8mm-fspi"
|
||||
"nxp,imx8mp-fspi"
|
||||
"nxp,imx8dxl-fspi"
|
||||
|
||||
- reg : First contains the register location and length,
|
||||
Second contains the memory mapping address and length
|
||||
- reg-names : Should contain the resource reg names:
|
||||
- fspi_base: configuration register address space
|
||||
- fspi_mmap: memory mapped address space
|
||||
- interrupts : Should contain the interrupt for the device
|
||||
|
||||
Required SPI slave node properties:
|
||||
- reg : There are two buses (A and B) with two chip selects each.
|
||||
This encodes to which bus and CS the flash is connected:
|
||||
- <0>: Bus A, CS 0
|
||||
- <1>: Bus A, CS 1
|
||||
- <2>: Bus B, CS 0
|
||||
- <3>: Bus B, CS 1
|
||||
|
||||
Example showing the usage of two SPI NOR slave devices on bus A:
|
||||
|
||||
fspi0: spi@20c0000 {
|
||||
compatible = "nxp,lx2160a-fspi";
|
||||
reg = <0x0 0x20c0000 0x0 0x10000>, <0x0 0x20000000 0x0 0x10000000>;
|
||||
reg-names = "fspi_base", "fspi_mmap";
|
||||
interrupts = <0 25 0x4>; /* Level high type */
|
||||
clocks = <&clockgen 4 3>, <&clockgen 4 3>;
|
||||
clock-names = "fspi_en", "fspi";
|
||||
|
||||
mt35xu512aba0: flash@0 {
|
||||
reg = <0>;
|
||||
....
|
||||
};
|
||||
|
||||
mt35xu512aba1: flash@1 {
|
||||
reg = <1>;
|
||||
....
|
||||
};
|
||||
};
|
||||
86
Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml
Normal file
86
Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml
Normal file
@@ -0,0 +1,86 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/spi/spi-nxp-fspi.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: NXP Flex Serial Peripheral Interface (FSPI)
|
||||
|
||||
maintainers:
|
||||
- Kuldeep Singh <kuldeep.singh@nxp.com>
|
||||
|
||||
allOf:
|
||||
- $ref: "spi-controller.yaml#"
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- nxp,imx8dxl-fspi
|
||||
- nxp,imx8mm-fspi
|
||||
- nxp,imx8mp-fspi
|
||||
- nxp,imx8qxp-fspi
|
||||
- nxp,lx2160a-fspi
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: registers address space
|
||||
- description: memory mapped address space
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: fspi_base
|
||||
- const: fspi_mmap
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: SPI bus clock
|
||||
- description: SPI serial clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: fspi_en
|
||||
- const: fspi
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- reg-names
|
||||
- interrupts
|
||||
- clocks
|
||||
- clock-names
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
|
||||
|
||||
soc {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
spi@20c0000 {
|
||||
compatible = "nxp,lx2160a-fspi";
|
||||
reg = <0x0 0x20c0000 0x0 0x100000>,
|
||||
<0x0 0x20000000 0x0 0x10000000>;
|
||||
reg-names = "fspi_base", "fspi_mmap";
|
||||
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL QORIQ_CLK_PLL_DIV(4)>,
|
||||
<&clockgen QORIQ_CLK_PLATFORM_PLL QORIQ_CLK_PLL_DIV(4)>;
|
||||
clock-names = "fspi_en", "fspi";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
flash@0 {
|
||||
compatible = "jedec,spi-nor";
|
||||
spi-max-frequency = <50000000>;
|
||||
reg = <0>;
|
||||
spi-rx-bus-width = <8>;
|
||||
spi-tx-bus-width = <8>;
|
||||
};
|
||||
};
|
||||
};
|
||||
@@ -336,14 +336,6 @@ certainly includes SPI devices hooked up through the card connectors!
|
||||
Non-static Configurations
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Developer boards often play by different rules than product boards, and one
|
||||
example is the potential need to hotplug SPI devices and/or controllers.
|
||||
|
||||
For those cases you might need to use spi_busnum_to_master() to look
|
||||
up the spi bus master, and will likely need spi_new_device() to provide the
|
||||
board info based on the board that was hotplugged. Of course, you'd later
|
||||
call at least spi_unregister_device() when that board is removed.
|
||||
|
||||
When Linux includes support for MMC/SD/SDIO/DataFlash cards through SPI, those
|
||||
configurations will also be dynamic. Fortunately, such devices all support
|
||||
basic device identification probes, so they should hotplug normally.
|
||||
|
||||
@@ -13488,7 +13488,7 @@ M: Ashish Kumar <ashish.kumar@nxp.com>
|
||||
R: Yogesh Gaur <yogeshgaur.83@gmail.com>
|
||||
L: linux-spi@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt
|
||||
F: Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml
|
||||
F: drivers/spi/spi-nxp-fspi.c
|
||||
|
||||
NXP FXAS21002C DRIVER
|
||||
|
||||
@@ -113,9 +113,12 @@
|
||||
* Use the 32.768 kHz oscillator as the parent of the RTC for a higher
|
||||
* precision.
|
||||
*/
|
||||
assigned-clocks = <&cgu JZ4780_CLK_OTGPHY>, <&cgu JZ4780_CLK_RTC>;
|
||||
assigned-clock-parents = <0>, <&cgu JZ4780_CLK_RTCLK>;
|
||||
assigned-clock-rates = <48000000>;
|
||||
assigned-clocks = <&cgu JZ4780_CLK_OTGPHY>, <&cgu JZ4780_CLK_RTC>,
|
||||
<&cgu JZ4780_CLK_SSIPLL>, <&cgu JZ4780_CLK_SSI>;
|
||||
assigned-clock-parents = <0>, <&cgu JZ4780_CLK_RTCLK>,
|
||||
<&cgu JZ4780_CLK_MPLL>,
|
||||
<&cgu JZ4780_CLK_SSIPLL>;
|
||||
assigned-clock-rates = <48000000>, <0>, <54000000>;
|
||||
};
|
||||
|
||||
&tcu {
|
||||
|
||||
@@ -255,22 +255,23 @@
|
||||
};
|
||||
};
|
||||
|
||||
spi_gpio {
|
||||
compatible = "spi-gpio";
|
||||
spi0: spi@10043000 {
|
||||
compatible = "ingenic,jz4780-spi";
|
||||
reg = <0x10043000 0x1c>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
num-chipselects = <2>;
|
||||
|
||||
gpio-miso = <&gpe 14 0>;
|
||||
gpio-sck = <&gpe 15 0>;
|
||||
gpio-mosi = <&gpe 17 0>;
|
||||
cs-gpios = <&gpe 16 0>, <&gpe 18 0>;
|
||||
interrupt-parent = <&intc>;
|
||||
interrupts = <8>;
|
||||
|
||||
spidev@0 {
|
||||
compatible = "spidev";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <1000000>;
|
||||
};
|
||||
clocks = <&cgu JZ4780_CLK_SSI0>;
|
||||
clock-names = "spi";
|
||||
|
||||
dmas = <&dma JZ4780_DMA_SSI0_RX 0xffffffff>,
|
||||
<&dma JZ4780_DMA_SSI0_TX 0xffffffff>;
|
||||
dma-names = "rx", "tx";
|
||||
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
uart0: serial@10030000 {
|
||||
@@ -338,6 +339,25 @@
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
spi1: spi@10044000 {
|
||||
compatible = "ingenic,jz4780-spi";
|
||||
reg = <0x10044000 0x1c>;
|
||||
#address-cells = <1>;
|
||||
#size-sells = <0>;
|
||||
|
||||
interrupt-parent = <&intc>;
|
||||
interrupts = <7>;
|
||||
|
||||
clocks = <&cgu JZ4780_CLK_SSI1>;
|
||||
clock-names = "spi";
|
||||
|
||||
dmas = <&dma JZ4780_DMA_SSI1_RX 0xffffffff>,
|
||||
<&dma JZ4780_DMA_SSI1_TX 0xffffffff>;
|
||||
dma-names = "rx", "tx";
|
||||
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
i2c0: i2c@10050000 {
|
||||
compatible = "ingenic,jz4780-i2c", "ingenic,jz4770-i2c";
|
||||
#address-cells = <1>;
|
||||
|
||||
@@ -647,6 +647,23 @@ int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
|
||||
|
||||
/**
|
||||
* zynqmp_pm_ospi_mux_select() - OSPI Mux selection
|
||||
*
|
||||
* @dev_id: Device Id of the OSPI device.
|
||||
* @select: OSPI Mux select value.
|
||||
*
|
||||
* This function select the OSPI Mux.
|
||||
*
|
||||
* Return: Returns status, either success or error+reason
|
||||
*/
|
||||
int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
|
||||
{
|
||||
return zynqmp_pm_invoke_fn(PM_IOCTL, dev_id, IOCTL_OSPI_MUX_SELECT,
|
||||
select, 0, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zynqmp_pm_ospi_mux_select);
|
||||
|
||||
/**
|
||||
* zynqmp_pm_write_ggs() - PM API for writing global general storage (ggs)
|
||||
* @index: GGS register index
|
||||
|
||||
@@ -228,6 +228,18 @@ config SPI_CADENCE_QUADSPI
|
||||
device with a Cadence QSPI controller and want to access the
|
||||
Flash as an MTD device.
|
||||
|
||||
config SPI_CADENCE_XSPI
|
||||
tristate "Cadence XSPI controller"
|
||||
depends on (OF || COMPILE_TEST) && HAS_IOMEM
|
||||
depends on SPI_MEM
|
||||
help
|
||||
Enable support for the Cadence XSPI Flash controller.
|
||||
|
||||
Cadence XSPI is a specialized controller for connecting an SPI
|
||||
Flash over upto 8bit wide bus. Enable this option if you have a
|
||||
device with a Cadence XSPI controller and want to access the
|
||||
Flash as an MTD device.
|
||||
|
||||
config SPI_CLPS711X
|
||||
tristate "CLPS711X host SPI controller"
|
||||
depends on ARCH_CLPS711X || COMPILE_TEST
|
||||
@@ -406,6 +418,15 @@ config SPI_IMX
|
||||
help
|
||||
This enables support for the Freescale i.MX SPI controllers.
|
||||
|
||||
config SPI_INGENIC
|
||||
tristate "Ingenic JZ47xx SoCs SPI controller"
|
||||
depends on MACH_INGENIC || COMPILE_TEST
|
||||
help
|
||||
This enables support for the Ingenic JZ47xx SoCs SPI controller.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called spi-ingenic.
|
||||
|
||||
config SPI_JCORE
|
||||
tristate "J-Core SPI Master"
|
||||
depends on OF && (SUPERH || COMPILE_TEST)
|
||||
@@ -738,10 +759,11 @@ config SPI_S3C24XX_FIQ
|
||||
TX and RX data paths.
|
||||
|
||||
config SPI_S3C64XX
|
||||
tristate "Samsung S3C64XX series type SPI"
|
||||
tristate "Samsung S3C64XX/Exynos SoC series type SPI"
|
||||
depends on (PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST)
|
||||
help
|
||||
SPI driver for Samsung S3C64XX and newer SoCs.
|
||||
SPI driver for Samsung S3C64XX, S5Pv210 and Exynos SoCs.
|
||||
Choose Y/M here only if you build for such Samsung SoC.
|
||||
|
||||
config SPI_SC18IS602
|
||||
tristate "NXP SC18IS602/602B/603 I2C to SPI bridge"
|
||||
|
||||
@@ -34,6 +34,7 @@ obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
|
||||
obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
|
||||
obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o
|
||||
obj-$(CONFIG_SPI_CADENCE_QUADSPI) += spi-cadence-quadspi.o
|
||||
obj-$(CONFIG_SPI_CADENCE_XSPI) += spi-cadence-xspi.o
|
||||
obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
|
||||
obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
|
||||
obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
|
||||
@@ -59,6 +60,7 @@ obj-$(CONFIG_SPI_HISI_KUNPENG) += spi-hisi-kunpeng.o
|
||||
obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o
|
||||
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
|
||||
obj-$(CONFIG_SPI_IMX) += spi-imx.o
|
||||
obj-$(CONFIG_SPI_INGENIC) += spi-ingenic.o
|
||||
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
|
||||
obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
|
||||
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
|
||||
|
||||
@@ -310,7 +310,7 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
|
||||
return mode;
|
||||
ifr |= atmel_qspi_modes[mode].config;
|
||||
|
||||
if (op->dummy.buswidth && op->dummy.nbytes)
|
||||
if (op->dummy.nbytes)
|
||||
dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
|
||||
|
||||
/*
|
||||
|
||||
@@ -38,126 +38,102 @@ struct amd_spi {
|
||||
void __iomem *io_remap_addr;
|
||||
unsigned long io_base_addr;
|
||||
u32 rom_addr;
|
||||
u8 chip_select;
|
||||
};
|
||||
|
||||
static inline u8 amd_spi_readreg8(struct spi_master *master, int idx)
|
||||
static inline u8 amd_spi_readreg8(struct amd_spi *amd_spi, int idx)
|
||||
{
|
||||
struct amd_spi *amd_spi = spi_master_get_devdata(master);
|
||||
|
||||
return ioread8((u8 __iomem *)amd_spi->io_remap_addr + idx);
|
||||
}
|
||||
|
||||
static inline void amd_spi_writereg8(struct spi_master *master, int idx,
|
||||
u8 val)
|
||||
static inline void amd_spi_writereg8(struct amd_spi *amd_spi, int idx, u8 val)
|
||||
{
|
||||
struct amd_spi *amd_spi = spi_master_get_devdata(master);
|
||||
|
||||
iowrite8(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx));
|
||||
}
|
||||
|
||||
static inline void amd_spi_setclear_reg8(struct spi_master *master, int idx,
|
||||
u8 set, u8 clear)
|
||||
static void amd_spi_setclear_reg8(struct amd_spi *amd_spi, int idx, u8 set, u8 clear)
|
||||
{
|
||||
u8 tmp = amd_spi_readreg8(master, idx);
|
||||
u8 tmp = amd_spi_readreg8(amd_spi, idx);
|
||||
|
||||
tmp = (tmp & ~clear) | set;
|
||||
amd_spi_writereg8(master, idx, tmp);
|
||||
amd_spi_writereg8(amd_spi, idx, tmp);
|
||||
}
|
||||
|
||||
static inline u32 amd_spi_readreg32(struct spi_master *master, int idx)
|
||||
static inline u32 amd_spi_readreg32(struct amd_spi *amd_spi, int idx)
|
||||
{
|
||||
struct amd_spi *amd_spi = spi_master_get_devdata(master);
|
||||
|
||||
return ioread32((u8 __iomem *)amd_spi->io_remap_addr + idx);
|
||||
}
|
||||
|
||||
static inline void amd_spi_writereg32(struct spi_master *master, int idx,
|
||||
u32 val)
|
||||
static inline void amd_spi_writereg32(struct amd_spi *amd_spi, int idx, u32 val)
|
||||
{
|
||||
struct amd_spi *amd_spi = spi_master_get_devdata(master);
|
||||
|
||||
iowrite32(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx));
|
||||
}
|
||||
|
||||
static inline void amd_spi_setclear_reg32(struct spi_master *master, int idx,
|
||||
u32 set, u32 clear)
|
||||
static inline void amd_spi_setclear_reg32(struct amd_spi *amd_spi, int idx, u32 set, u32 clear)
|
||||
{
|
||||
u32 tmp = amd_spi_readreg32(master, idx);
|
||||
u32 tmp = amd_spi_readreg32(amd_spi, idx);
|
||||
|
||||
tmp = (tmp & ~clear) | set;
|
||||
amd_spi_writereg32(master, idx, tmp);
|
||||
amd_spi_writereg32(amd_spi, idx, tmp);
|
||||
}
|
||||
|
||||
static void amd_spi_select_chip(struct spi_master *master)
|
||||
static void amd_spi_select_chip(struct amd_spi *amd_spi, u8 cs)
|
||||
{
|
||||
struct amd_spi *amd_spi = spi_master_get_devdata(master);
|
||||
u8 chip_select = amd_spi->chip_select;
|
||||
|
||||
amd_spi_setclear_reg8(master, AMD_SPI_ALT_CS_REG, chip_select,
|
||||
AMD_SPI_ALT_CS_MASK);
|
||||
amd_spi_setclear_reg8(amd_spi, AMD_SPI_ALT_CS_REG, cs, AMD_SPI_ALT_CS_MASK);
|
||||
}
|
||||
|
||||
static void amd_spi_clear_fifo_ptr(struct spi_master *master)
|
||||
static void amd_spi_clear_fifo_ptr(struct amd_spi *amd_spi)
|
||||
{
|
||||
amd_spi_setclear_reg32(master, AMD_SPI_CTRL0_REG, AMD_SPI_FIFO_CLEAR,
|
||||
AMD_SPI_FIFO_CLEAR);
|
||||
amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_FIFO_CLEAR, AMD_SPI_FIFO_CLEAR);
|
||||
}
|
||||
|
||||
static void amd_spi_set_opcode(struct spi_master *master, u8 cmd_opcode)
|
||||
static void amd_spi_set_opcode(struct amd_spi *amd_spi, u8 cmd_opcode)
|
||||
{
|
||||
amd_spi_setclear_reg32(master, AMD_SPI_CTRL0_REG, cmd_opcode,
|
||||
AMD_SPI_OPCODE_MASK);
|
||||
amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, cmd_opcode, AMD_SPI_OPCODE_MASK);
|
||||
}
|
||||
|
||||
static inline void amd_spi_set_rx_count(struct spi_master *master,
|
||||
u8 rx_count)
|
||||
static inline void amd_spi_set_rx_count(struct amd_spi *amd_spi, u8 rx_count)
|
||||
{
|
||||
amd_spi_setclear_reg8(master, AMD_SPI_RX_COUNT_REG, rx_count, 0xff);
|
||||
amd_spi_setclear_reg8(amd_spi, AMD_SPI_RX_COUNT_REG, rx_count, 0xff);
|
||||
}
|
||||
|
||||
static inline void amd_spi_set_tx_count(struct spi_master *master,
|
||||
u8 tx_count)
|
||||
static inline void amd_spi_set_tx_count(struct amd_spi *amd_spi, u8 tx_count)
|
||||
{
|
||||
amd_spi_setclear_reg8(master, AMD_SPI_TX_COUNT_REG, tx_count, 0xff);
|
||||
amd_spi_setclear_reg8(amd_spi, AMD_SPI_TX_COUNT_REG, tx_count, 0xff);
|
||||
}
|
||||
|
||||
static inline int amd_spi_busy_wait(struct amd_spi *amd_spi)
|
||||
static int amd_spi_busy_wait(struct amd_spi *amd_spi)
|
||||
{
|
||||
bool spi_busy;
|
||||
int timeout = 100000;
|
||||
|
||||
/* poll for SPI bus to become idle */
|
||||
spi_busy = (ioread32((u8 __iomem *)amd_spi->io_remap_addr +
|
||||
AMD_SPI_CTRL0_REG) & AMD_SPI_BUSY) == AMD_SPI_BUSY;
|
||||
while (spi_busy) {
|
||||
while (amd_spi_readreg32(amd_spi, AMD_SPI_CTRL0_REG) & AMD_SPI_BUSY) {
|
||||
usleep_range(10, 20);
|
||||
if (timeout-- < 0)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
spi_busy = (ioread32((u8 __iomem *)amd_spi->io_remap_addr +
|
||||
AMD_SPI_CTRL0_REG) & AMD_SPI_BUSY) == AMD_SPI_BUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amd_spi_execute_opcode(struct spi_master *master)
|
||||
static int amd_spi_execute_opcode(struct amd_spi *amd_spi)
|
||||
{
|
||||
struct amd_spi *amd_spi = spi_master_get_devdata(master);
|
||||
int ret;
|
||||
|
||||
ret = amd_spi_busy_wait(amd_spi);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Set ExecuteOpCode bit in the CTRL0 register */
|
||||
amd_spi_setclear_reg32(master, AMD_SPI_CTRL0_REG, AMD_SPI_EXEC_CMD,
|
||||
AMD_SPI_EXEC_CMD);
|
||||
amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_EXEC_CMD, AMD_SPI_EXEC_CMD);
|
||||
|
||||
amd_spi_busy_wait(amd_spi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amd_spi_master_setup(struct spi_device *spi)
|
||||
{
|
||||
struct spi_master *master = spi->master;
|
||||
struct amd_spi *amd_spi = spi_master_get_devdata(spi->master);
|
||||
|
||||
amd_spi_clear_fifo_ptr(master);
|
||||
amd_spi_clear_fifo_ptr(amd_spi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -185,19 +161,18 @@ static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
|
||||
tx_len = xfer->len - 1;
|
||||
cmd_opcode = *(u8 *)xfer->tx_buf;
|
||||
buf++;
|
||||
amd_spi_set_opcode(master, cmd_opcode);
|
||||
amd_spi_set_opcode(amd_spi, cmd_opcode);
|
||||
|
||||
/* Write data into the FIFO. */
|
||||
for (i = 0; i < tx_len; i++) {
|
||||
iowrite8(buf[i],
|
||||
((u8 __iomem *)amd_spi->io_remap_addr +
|
||||
iowrite8(buf[i], ((u8 __iomem *)amd_spi->io_remap_addr +
|
||||
AMD_SPI_FIFO_BASE + i));
|
||||
}
|
||||
|
||||
amd_spi_set_tx_count(master, tx_len);
|
||||
amd_spi_clear_fifo_ptr(master);
|
||||
amd_spi_set_tx_count(amd_spi, tx_len);
|
||||
amd_spi_clear_fifo_ptr(amd_spi);
|
||||
/* Execute command */
|
||||
amd_spi_execute_opcode(master);
|
||||
amd_spi_execute_opcode(amd_spi);
|
||||
}
|
||||
if (m_cmd & AMD_SPI_XFER_RX) {
|
||||
/*
|
||||
@@ -206,15 +181,14 @@ static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
|
||||
*/
|
||||
rx_len = xfer->len;
|
||||
buf = (u8 *)xfer->rx_buf;
|
||||
amd_spi_set_rx_count(master, rx_len);
|
||||
amd_spi_clear_fifo_ptr(master);
|
||||
amd_spi_set_rx_count(amd_spi, rx_len);
|
||||
amd_spi_clear_fifo_ptr(amd_spi);
|
||||
/* Execute command */
|
||||
amd_spi_execute_opcode(master);
|
||||
amd_spi_execute_opcode(amd_spi);
|
||||
amd_spi_busy_wait(amd_spi);
|
||||
/* Read data from FIFO to receive buffer */
|
||||
for (i = 0; i < rx_len; i++)
|
||||
buf[i] = amd_spi_readreg8(master,
|
||||
AMD_SPI_FIFO_BASE +
|
||||
tx_len + i);
|
||||
buf[i] = amd_spi_readreg8(amd_spi, AMD_SPI_FIFO_BASE + tx_len + i);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -233,8 +207,7 @@ static int amd_spi_master_transfer(struct spi_master *master,
|
||||
struct amd_spi *amd_spi = spi_master_get_devdata(master);
|
||||
struct spi_device *spi = msg->spi;
|
||||
|
||||
amd_spi->chip_select = spi->chip_select;
|
||||
amd_spi_select_chip(master);
|
||||
amd_spi_select_chip(amd_spi, spi->chip_select);
|
||||
|
||||
/*
|
||||
* Extract spi_transfers from the spi message and
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/pinctrl/consumer.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
@@ -482,29 +482,12 @@ static void at91_usart_spi_init(struct at91_usart_spi *aus)
|
||||
|
||||
static int at91_usart_gpio_setup(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *np = pdev->dev.parent->of_node;
|
||||
int i;
|
||||
int ret;
|
||||
int nb;
|
||||
struct gpio_descs *cs_gpios;
|
||||
|
||||
if (!np)
|
||||
return -EINVAL;
|
||||
cs_gpios = devm_gpiod_get_array_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
|
||||
|
||||
nb = of_gpio_named_count(np, "cs-gpios");
|
||||
for (i = 0; i < nb; i++) {
|
||||
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
|
||||
|
||||
if (cs_gpio < 0)
|
||||
return cs_gpio;
|
||||
|
||||
if (gpio_is_valid(cs_gpio)) {
|
||||
ret = devm_gpio_request_one(&pdev->dev, cs_gpio,
|
||||
GPIOF_DIR_OUT,
|
||||
dev_name(&pdev->dev));
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (IS_ERR(cs_gpios))
|
||||
return PTR_ERR(cs_gpios);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -83,6 +83,9 @@
|
||||
/* MSPI register offsets */
|
||||
#define MSPI_SPCR0_LSB 0x000
|
||||
#define MSPI_SPCR0_MSB 0x004
|
||||
#define MSPI_SPCR0_MSB_CPHA BIT(0)
|
||||
#define MSPI_SPCR0_MSB_CPOL BIT(1)
|
||||
#define MSPI_SPCR0_MSB_BITS_SHIFT 0x2
|
||||
#define MSPI_SPCR1_LSB 0x008
|
||||
#define MSPI_SPCR1_MSB 0x00c
|
||||
#define MSPI_NEWQP 0x010
|
||||
@@ -100,8 +103,10 @@
|
||||
#define MSPI_MASTER_BIT BIT(7)
|
||||
|
||||
#define MSPI_NUM_CDRAM 16
|
||||
#define MSPI_CDRAM_OUTP BIT(8)
|
||||
#define MSPI_CDRAM_CONT_BIT BIT(7)
|
||||
#define MSPI_CDRAM_BITSE_BIT BIT(6)
|
||||
#define MSPI_CDRAM_DT_BIT BIT(5)
|
||||
#define MSPI_CDRAM_PCS 0xf
|
||||
|
||||
#define MSPI_SPCR2_SPE BIT(6)
|
||||
@@ -114,6 +119,14 @@
|
||||
~(BIT(10) | BIT(11)))
|
||||
#define MSPI_SPCR3_SYSCLKSEL_108 (MSPI_SPCR3_SYSCLKSEL_MASK & \
|
||||
BIT(11))
|
||||
#define MSPI_SPCR3_TXRXDAM_MASK GENMASK(4, 2)
|
||||
#define MSPI_SPCR3_DAM_8BYTE 0
|
||||
#define MSPI_SPCR3_DAM_16BYTE (BIT(2) | BIT(4))
|
||||
#define MSPI_SPCR3_DAM_32BYTE (BIT(3) | BIT(5))
|
||||
#define MSPI_SPCR3_HALFDUPLEX BIT(6)
|
||||
#define MSPI_SPCR3_HDOUTTYPE BIT(7)
|
||||
#define MSPI_SPCR3_DATA_REG_SZ BIT(8)
|
||||
#define MSPI_SPCR3_CPHARX BIT(9)
|
||||
|
||||
#define MSPI_MSPI_STATUS_SPIF BIT(0)
|
||||
|
||||
@@ -153,6 +166,14 @@
|
||||
#define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM | \
|
||||
TRANS_STATUS_BREAK_CS_CHANGE)
|
||||
|
||||
/*
|
||||
* Used for writing and reading data in the right order
|
||||
* to TXRAM and RXRAM when used as 32-bit registers respectively
|
||||
*/
|
||||
#define swap4bytes(__val) \
|
||||
((((__val) >> 24) & 0x000000FF) | (((__val) >> 8) & 0x0000FF00) | \
|
||||
(((__val) << 8) & 0x00FF0000) | (((__val) << 24) & 0xFF000000))
|
||||
|
||||
struct bcm_qspi_parms {
|
||||
u32 speed_hz;
|
||||
u8 mode;
|
||||
@@ -261,7 +282,7 @@ static inline bool bcm_qspi_has_sysclk_108(struct bcm_qspi *qspi)
|
||||
static inline int bcm_qspi_spbr_min(struct bcm_qspi *qspi)
|
||||
{
|
||||
if (bcm_qspi_has_fastbr(qspi))
|
||||
return 1;
|
||||
return (bcm_qspi_has_sysclk_108(qspi) ? 4 : 1);
|
||||
else
|
||||
return 8;
|
||||
}
|
||||
@@ -395,7 +416,8 @@ static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
|
||||
if (addrlen == BSPI_ADDRLEN_4BYTES)
|
||||
bpp = BSPI_BPP_ADDR_SELECT_MASK;
|
||||
|
||||
bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth;
|
||||
if (op->dummy.nbytes)
|
||||
bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth;
|
||||
|
||||
switch (width) {
|
||||
case SPI_NBITS_SINGLE:
|
||||
@@ -570,23 +592,23 @@ static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
|
||||
{
|
||||
u32 spcr, spbr = 0;
|
||||
|
||||
if (xp->speed_hz)
|
||||
spbr = qspi->base_clk / (2 * xp->speed_hz);
|
||||
|
||||
spcr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX);
|
||||
bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spcr);
|
||||
|
||||
if (!qspi->mspi_maj_rev)
|
||||
/* legacy controller */
|
||||
spcr = MSPI_MASTER_BIT;
|
||||
else
|
||||
spcr = 0;
|
||||
|
||||
/* for 16 bit the data should be zero */
|
||||
if (xp->bits_per_word != 16)
|
||||
spcr |= xp->bits_per_word << 2;
|
||||
spcr |= xp->mode & 3;
|
||||
/*
|
||||
* Bits per transfer. BITS determines the number of data bits
|
||||
* transferred if the command control bit (BITSE of a
|
||||
* CDRAM Register) is equal to 1.
|
||||
* If CDRAM BITSE is equal to 0, 8 data bits are transferred
|
||||
* regardless
|
||||
*/
|
||||
if (xp->bits_per_word != 16 && xp->bits_per_word != 64)
|
||||
spcr |= xp->bits_per_word << MSPI_SPCR0_MSB_BITS_SHIFT;
|
||||
|
||||
spcr |= xp->mode & (MSPI_SPCR0_MSB_CPHA | MSPI_SPCR0_MSB_CPOL);
|
||||
bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr);
|
||||
|
||||
if (bcm_qspi_has_fastbr(qspi)) {
|
||||
@@ -595,17 +617,44 @@ static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
|
||||
/* enable fastbr */
|
||||
spcr |= MSPI_SPCR3_FASTBR;
|
||||
|
||||
if (xp->mode & SPI_3WIRE)
|
||||
spcr |= MSPI_SPCR3_HALFDUPLEX | MSPI_SPCR3_HDOUTTYPE;
|
||||
|
||||
if (bcm_qspi_has_sysclk_108(qspi)) {
|
||||
/* SYSCLK_108 */
|
||||
spcr |= MSPI_SPCR3_SYSCLKSEL_108;
|
||||
qspi->base_clk = MSPI_BASE_FREQ * 4;
|
||||
/* Change spbr as we changed sysclk */
|
||||
bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, 4);
|
||||
}
|
||||
|
||||
if (xp->bits_per_word > 16) {
|
||||
/* data_reg_size 1 (64bit) */
|
||||
spcr |= MSPI_SPCR3_DATA_REG_SZ;
|
||||
/* TxRx RAM data access mode 2 for 32B and set fastdt */
|
||||
spcr |= MSPI_SPCR3_DAM_32BYTE | MSPI_SPCR3_FASTDT;
|
||||
/*
|
||||
* Set length of delay after transfer
|
||||
* DTL from 0(256) to 1
|
||||
*/
|
||||
bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 1);
|
||||
} else {
|
||||
/* data_reg_size[8] = 0 */
|
||||
spcr &= ~(MSPI_SPCR3_DATA_REG_SZ);
|
||||
|
||||
/*
|
||||
* TxRx RAM access mode 8B
|
||||
* and disable fastdt
|
||||
*/
|
||||
spcr &= ~(MSPI_SPCR3_DAM_32BYTE);
|
||||
}
|
||||
bcm_qspi_write(qspi, MSPI, MSPI_SPCR3, spcr);
|
||||
}
|
||||
|
||||
if (xp->speed_hz)
|
||||
spbr = qspi->base_clk / (2 * xp->speed_hz);
|
||||
|
||||
spbr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX);
|
||||
bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spbr);
|
||||
|
||||
qspi->last_parms = *xp;
|
||||
}
|
||||
|
||||
@@ -626,7 +675,7 @@ static int bcm_qspi_setup(struct spi_device *spi)
|
||||
{
|
||||
struct bcm_qspi_parms *xp;
|
||||
|
||||
if (spi->bits_per_word > 16)
|
||||
if (spi->bits_per_word > 64)
|
||||
return -EINVAL;
|
||||
|
||||
xp = spi_get_ctldata(spi);
|
||||
@@ -665,8 +714,12 @@ static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
|
||||
/* count the last transferred bytes */
|
||||
if (qt->trans->bits_per_word <= 8)
|
||||
qt->byte++;
|
||||
else
|
||||
else if (qt->trans->bits_per_word <= 16)
|
||||
qt->byte += 2;
|
||||
else if (qt->trans->bits_per_word <= 32)
|
||||
qt->byte += 4;
|
||||
else if (qt->trans->bits_per_word <= 64)
|
||||
qt->byte += 8;
|
||||
|
||||
if (qt->byte >= qt->trans->len) {
|
||||
/* we're at the end of the spi_transfer */
|
||||
@@ -709,6 +762,33 @@ static inline u16 read_rxram_slot_u16(struct bcm_qspi *qspi, int slot)
|
||||
((bcm_qspi_read(qspi, MSPI, msb_offset) & 0xff) << 8);
|
||||
}
|
||||
|
||||
static inline u32 read_rxram_slot_u32(struct bcm_qspi *qspi, int slot)
|
||||
{
|
||||
u32 reg_offset = MSPI_RXRAM;
|
||||
u32 offset = reg_offset + (slot << 3);
|
||||
u32 val;
|
||||
|
||||
val = bcm_qspi_read(qspi, MSPI, offset);
|
||||
val = swap4bytes(val);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline u64 read_rxram_slot_u64(struct bcm_qspi *qspi, int slot)
|
||||
{
|
||||
u32 reg_offset = MSPI_RXRAM;
|
||||
u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
|
||||
u32 msb_offset = reg_offset + (slot << 3);
|
||||
u32 msb, lsb;
|
||||
|
||||
msb = bcm_qspi_read(qspi, MSPI, msb_offset);
|
||||
msb = swap4bytes(msb);
|
||||
lsb = bcm_qspi_read(qspi, MSPI, lsb_offset);
|
||||
lsb = swap4bytes(lsb);
|
||||
|
||||
return ((u64)msb << 32 | lsb);
|
||||
}
|
||||
|
||||
static void read_from_hw(struct bcm_qspi *qspi, int slots)
|
||||
{
|
||||
struct qspi_trans tp;
|
||||
@@ -732,7 +812,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots)
|
||||
buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
|
||||
dev_dbg(&qspi->pdev->dev, "RD %02x\n",
|
||||
buf ? buf[tp.byte] : 0x0);
|
||||
} else {
|
||||
} else if (tp.trans->bits_per_word <= 16) {
|
||||
u16 *buf = tp.trans->rx_buf;
|
||||
|
||||
if (buf)
|
||||
@@ -740,6 +820,25 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots)
|
||||
slot);
|
||||
dev_dbg(&qspi->pdev->dev, "RD %04x\n",
|
||||
buf ? buf[tp.byte / 2] : 0x0);
|
||||
} else if (tp.trans->bits_per_word <= 32) {
|
||||
u32 *buf = tp.trans->rx_buf;
|
||||
|
||||
if (buf)
|
||||
buf[tp.byte / 4] = read_rxram_slot_u32(qspi,
|
||||
slot);
|
||||
dev_dbg(&qspi->pdev->dev, "RD %08x\n",
|
||||
buf ? buf[tp.byte / 4] : 0x0);
|
||||
|
||||
} else if (tp.trans->bits_per_word <= 64) {
|
||||
u64 *buf = tp.trans->rx_buf;
|
||||
|
||||
if (buf)
|
||||
buf[tp.byte / 8] = read_rxram_slot_u64(qspi,
|
||||
slot);
|
||||
dev_dbg(&qspi->pdev->dev, "RD %llx\n",
|
||||
buf ? buf[tp.byte / 8] : 0x0);
|
||||
|
||||
|
||||
}
|
||||
|
||||
update_qspi_trans_byte_count(qspi, &tp,
|
||||
@@ -769,6 +868,28 @@ static inline void write_txram_slot_u16(struct bcm_qspi *qspi, int slot,
|
||||
bcm_qspi_write(qspi, MSPI, lsb_offset, (val & 0xff));
|
||||
}
|
||||
|
||||
static inline void write_txram_slot_u32(struct bcm_qspi *qspi, int slot,
|
||||
u32 val)
|
||||
{
|
||||
u32 reg_offset = MSPI_TXRAM;
|
||||
u32 msb_offset = reg_offset + (slot << 3);
|
||||
|
||||
bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(val));
|
||||
}
|
||||
|
||||
static inline void write_txram_slot_u64(struct bcm_qspi *qspi, int slot,
|
||||
u64 val)
|
||||
{
|
||||
u32 reg_offset = MSPI_TXRAM;
|
||||
u32 msb_offset = reg_offset + (slot << 3);
|
||||
u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
|
||||
u32 msb = upper_32_bits(val);
|
||||
u32 lsb = lower_32_bits(val);
|
||||
|
||||
bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(msb));
|
||||
bcm_qspi_write(qspi, MSPI, lsb_offset, swap4bytes(lsb));
|
||||
}
|
||||
|
||||
static inline u32 read_cdram_slot(struct bcm_qspi *qspi, int slot)
|
||||
{
|
||||
return bcm_qspi_read(qspi, MSPI, MSPI_CDRAM + (slot << 2));
|
||||
@@ -792,20 +913,43 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
|
||||
|
||||
/* Run until end of transfer or reached the max data */
|
||||
while (!tstatus && slot < MSPI_NUM_CDRAM) {
|
||||
mspi_cdram = MSPI_CDRAM_CONT_BIT;
|
||||
if (tp.trans->bits_per_word <= 8) {
|
||||
const u8 *buf = tp.trans->tx_buf;
|
||||
u8 val = buf ? buf[tp.byte] : 0x00;
|
||||
|
||||
write_txram_slot_u8(qspi, slot, val);
|
||||
dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
|
||||
} else {
|
||||
} else if (tp.trans->bits_per_word <= 16) {
|
||||
const u16 *buf = tp.trans->tx_buf;
|
||||
u16 val = buf ? buf[tp.byte / 2] : 0x0000;
|
||||
|
||||
write_txram_slot_u16(qspi, slot, val);
|
||||
dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
|
||||
} else if (tp.trans->bits_per_word <= 32) {
|
||||
const u32 *buf = tp.trans->tx_buf;
|
||||
u32 val = buf ? buf[tp.byte/4] : 0x0;
|
||||
|
||||
write_txram_slot_u32(qspi, slot, val);
|
||||
dev_dbg(&qspi->pdev->dev, "WR %08x\n", val);
|
||||
} else if (tp.trans->bits_per_word <= 64) {
|
||||
const u64 *buf = tp.trans->tx_buf;
|
||||
u64 val = (buf ? buf[tp.byte/8] : 0x0);
|
||||
|
||||
/* use the length of delay from SPCR1_LSB */
|
||||
if (bcm_qspi_has_fastbr(qspi))
|
||||
mspi_cdram |= MSPI_CDRAM_DT_BIT;
|
||||
|
||||
write_txram_slot_u64(qspi, slot, val);
|
||||
dev_dbg(&qspi->pdev->dev, "WR %llx\n", val);
|
||||
}
|
||||
mspi_cdram = MSPI_CDRAM_CONT_BIT;
|
||||
|
||||
mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
|
||||
MSPI_CDRAM_BITSE_BIT);
|
||||
|
||||
/* set 3wrire halfduplex mode data from master to slave */
|
||||
if ((spi->mode & SPI_3WIRE) && tp.trans->tx_buf)
|
||||
mspi_cdram |= MSPI_CDRAM_OUTP;
|
||||
|
||||
if (has_bspi(qspi))
|
||||
mspi_cdram &= ~1;
|
||||
@@ -813,9 +957,6 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
|
||||
mspi_cdram |= (~(1 << spi->chip_select) &
|
||||
MSPI_CDRAM_PCS);
|
||||
|
||||
mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
|
||||
MSPI_CDRAM_BITSE_BIT);
|
||||
|
||||
write_cdram_slot(qspi, slot, mspi_cdram);
|
||||
|
||||
tstatus = update_qspi_trans_byte_count(qspi, &tp,
|
||||
@@ -1350,7 +1491,8 @@ int bcm_qspi_probe(struct platform_device *pdev,
|
||||
qspi->master = master;
|
||||
|
||||
master->bus_num = -1;
|
||||
master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD;
|
||||
master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD |
|
||||
SPI_3WIRE;
|
||||
master->setup = bcm_qspi_setup;
|
||||
master->transfer_one = bcm_qspi_transfer_one;
|
||||
master->mem_ops = &bcm_qspi_mem_ops;
|
||||
@@ -1460,7 +1602,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
|
||||
&qspi->dev_ids[val]);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "IRQ %s not found\n", name);
|
||||
goto qspi_probe_err;
|
||||
goto qspi_unprepare_err;
|
||||
}
|
||||
|
||||
qspi->dev_ids[val].dev = qspi;
|
||||
@@ -1475,7 +1617,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
|
||||
if (!num_ints) {
|
||||
dev_err(&pdev->dev, "no IRQs registered, cannot init driver\n");
|
||||
ret = -EINVAL;
|
||||
goto qspi_probe_err;
|
||||
goto qspi_unprepare_err;
|
||||
}
|
||||
|
||||
bcm_qspi_hw_init(qspi);
|
||||
@@ -1499,6 +1641,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
|
||||
|
||||
qspi_reg_err:
|
||||
bcm_qspi_hw_uninit(qspi);
|
||||
qspi_unprepare_err:
|
||||
clk_disable_unprepare(qspi->clk);
|
||||
qspi_probe_err:
|
||||
kfree(qspi->dev_ids);
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/firmware/xlnx-zynqmp.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
@@ -35,6 +36,7 @@
|
||||
/* Quirks */
|
||||
#define CQSPI_NEEDS_WR_DELAY BIT(0)
|
||||
#define CQSPI_DISABLE_DAC_MODE BIT(1)
|
||||
#define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
|
||||
|
||||
/* Capabilities */
|
||||
#define CQSPI_SUPPORTS_OCTAL BIT(0)
|
||||
@@ -82,11 +84,16 @@ struct cqspi_st {
|
||||
u32 wr_delay;
|
||||
bool use_direct_mode;
|
||||
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
|
||||
bool use_dma_read;
|
||||
u32 pd_dev_id;
|
||||
};
|
||||
|
||||
struct cqspi_driver_platdata {
|
||||
u32 hwcaps_mask;
|
||||
u8 quirks;
|
||||
int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata,
|
||||
u_char *rxbuf, loff_t from_addr, size_t n_rx);
|
||||
u32 (*get_dma_status)(struct cqspi_st *cqspi);
|
||||
};
|
||||
|
||||
/* Operation timeout value */
|
||||
@@ -217,6 +224,8 @@ struct cqspi_driver_platdata {
|
||||
#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
|
||||
#define CQSPI_REG_INDIRECTWRBYTES 0x7C
|
||||
|
||||
#define CQSPI_REG_INDTRIG_ADDRRANGE 0x80
|
||||
|
||||
#define CQSPI_REG_CMDADDRESS 0x94
|
||||
#define CQSPI_REG_CMDREADDATALOWER 0xA0
|
||||
#define CQSPI_REG_CMDREADDATAUPPER 0xA4
|
||||
@@ -231,6 +240,23 @@ struct cqspi_driver_platdata {
|
||||
#define CQSPI_REG_OP_EXT_WRITE_LSB 16
|
||||
#define CQSPI_REG_OP_EXT_STIG_LSB 0
|
||||
|
||||
#define CQSPI_REG_VERSAL_DMA_SRC_ADDR 0x1000
|
||||
|
||||
#define CQSPI_REG_VERSAL_DMA_DST_ADDR 0x1800
|
||||
#define CQSPI_REG_VERSAL_DMA_DST_SIZE 0x1804
|
||||
|
||||
#define CQSPI_REG_VERSAL_DMA_DST_CTRL 0x180C
|
||||
|
||||
#define CQSPI_REG_VERSAL_DMA_DST_I_STS 0x1814
|
||||
#define CQSPI_REG_VERSAL_DMA_DST_I_EN 0x1818
|
||||
#define CQSPI_REG_VERSAL_DMA_DST_I_DIS 0x181C
|
||||
#define CQSPI_REG_VERSAL_DMA_DST_DONE_MASK BIT(1)
|
||||
|
||||
#define CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB 0x1828
|
||||
|
||||
#define CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL 0xF43FFA00
|
||||
#define CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL 0x6
|
||||
|
||||
/* Interrupt status bits */
|
||||
#define CQSPI_REG_IRQ_MODE_ERR BIT(0)
|
||||
#define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
|
||||
@@ -250,6 +276,9 @@ struct cqspi_driver_platdata {
|
||||
CQSPI_REG_IRQ_UNDERFLOW)
|
||||
|
||||
#define CQSPI_IRQ_STATUS_MASK 0x1FFFF
|
||||
#define CQSPI_DMA_UNALIGN 0x3
|
||||
|
||||
#define CQSPI_REG_VERSAL_DMA_VAL 0x602
|
||||
|
||||
static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr)
|
||||
{
|
||||
@@ -275,10 +304,26 @@ static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
|
||||
return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
|
||||
}
|
||||
|
||||
static u32 cqspi_get_versal_dma_status(struct cqspi_st *cqspi)
|
||||
{
|
||||
u32 dma_status;
|
||||
|
||||
dma_status = readl(cqspi->iobase +
|
||||
CQSPI_REG_VERSAL_DMA_DST_I_STS);
|
||||
writel(dma_status, cqspi->iobase +
|
||||
CQSPI_REG_VERSAL_DMA_DST_I_STS);
|
||||
|
||||
return dma_status & CQSPI_REG_VERSAL_DMA_DST_DONE_MASK;
|
||||
}
|
||||
|
||||
static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
|
||||
{
|
||||
struct cqspi_st *cqspi = dev;
|
||||
unsigned int irq_status;
|
||||
struct device *device = &cqspi->pdev->dev;
|
||||
const struct cqspi_driver_platdata *ddata;
|
||||
|
||||
ddata = of_device_get_match_data(device);
|
||||
|
||||
/* Read interrupt status */
|
||||
irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
|
||||
@@ -286,6 +331,13 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
|
||||
/* Clear interrupt */
|
||||
writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
|
||||
|
||||
if (cqspi->use_dma_read && ddata && ddata->get_dma_status) {
|
||||
if (ddata->get_dma_status(cqspi)) {
|
||||
complete(&cqspi->transfer_complete);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
}
|
||||
|
||||
irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
|
||||
|
||||
if (irq_status)
|
||||
@@ -781,6 +833,131 @@ failrd:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cqspi_versal_indirect_read_dma(struct cqspi_flash_pdata *f_pdata,
|
||||
u_char *rxbuf, loff_t from_addr,
|
||||
size_t n_rx)
|
||||
{
|
||||
struct cqspi_st *cqspi = f_pdata->cqspi;
|
||||
struct device *dev = &cqspi->pdev->dev;
|
||||
void __iomem *reg_base = cqspi->iobase;
|
||||
u32 reg, bytes_to_dma;
|
||||
loff_t addr = from_addr;
|
||||
void *buf = rxbuf;
|
||||
dma_addr_t dma_addr;
|
||||
u8 bytes_rem;
|
||||
int ret = 0;
|
||||
|
||||
bytes_rem = n_rx % 4;
|
||||
bytes_to_dma = (n_rx - bytes_rem);
|
||||
|
||||
if (!bytes_to_dma)
|
||||
goto nondmard;
|
||||
|
||||
ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_DMA);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
|
||||
reg |= CQSPI_REG_CONFIG_DMA_MASK;
|
||||
writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
|
||||
|
||||
dma_addr = dma_map_single(dev, rxbuf, bytes_to_dma, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dev, dma_addr)) {
|
||||
dev_err(dev, "dma mapping failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
|
||||
writel(bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES);
|
||||
writel(CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL,
|
||||
reg_base + CQSPI_REG_INDTRIG_ADDRRANGE);
|
||||
|
||||
/* Clear all interrupts. */
|
||||
writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
|
||||
|
||||
/* Enable DMA done interrupt */
|
||||
writel(CQSPI_REG_VERSAL_DMA_DST_DONE_MASK,
|
||||
reg_base + CQSPI_REG_VERSAL_DMA_DST_I_EN);
|
||||
|
||||
/* Default DMA periph configuration */
|
||||
writel(CQSPI_REG_VERSAL_DMA_VAL, reg_base + CQSPI_REG_DMA);
|
||||
|
||||
/* Configure DMA Dst address */
|
||||
writel(lower_32_bits(dma_addr),
|
||||
reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR);
|
||||
writel(upper_32_bits(dma_addr),
|
||||
reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB);
|
||||
|
||||
/* Configure DMA Src address */
|
||||
writel(cqspi->trigger_address, reg_base +
|
||||
CQSPI_REG_VERSAL_DMA_SRC_ADDR);
|
||||
|
||||
/* Set DMA destination size */
|
||||
writel(bytes_to_dma, reg_base + CQSPI_REG_VERSAL_DMA_DST_SIZE);
|
||||
|
||||
/* Set DMA destination control */
|
||||
writel(CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL,
|
||||
reg_base + CQSPI_REG_VERSAL_DMA_DST_CTRL);
|
||||
|
||||
writel(CQSPI_REG_INDIRECTRD_START_MASK,
|
||||
reg_base + CQSPI_REG_INDIRECTRD);
|
||||
|
||||
reinit_completion(&cqspi->transfer_complete);
|
||||
|
||||
if (!wait_for_completion_timeout(&cqspi->transfer_complete,
|
||||
msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) {
|
||||
ret = -ETIMEDOUT;
|
||||
goto failrd;
|
||||
}
|
||||
|
||||
/* Disable DMA interrupt */
|
||||
writel(0x0, cqspi->iobase + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
|
||||
|
||||
/* Clear indirect completion status */
|
||||
writel(CQSPI_REG_INDIRECTRD_DONE_MASK,
|
||||
cqspi->iobase + CQSPI_REG_INDIRECTRD);
|
||||
dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
|
||||
|
||||
reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
|
||||
reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
|
||||
writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
|
||||
|
||||
ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id,
|
||||
PM_OSPI_MUX_SEL_LINEAR);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nondmard:
|
||||
if (bytes_rem) {
|
||||
addr += bytes_to_dma;
|
||||
buf += bytes_to_dma;
|
||||
ret = cqspi_indirect_read_execute(f_pdata, buf, addr,
|
||||
bytes_rem);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
failrd:
|
||||
/* Disable DMA interrupt */
|
||||
writel(0x0, reg_base + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
|
||||
|
||||
/* Cancel the indirect read */
|
||||
writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
|
||||
reg_base + CQSPI_REG_INDIRECTRD);
|
||||
|
||||
dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
|
||||
|
||||
reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
|
||||
reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
|
||||
writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
|
||||
|
||||
zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_LINEAR);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
|
||||
const struct spi_mem_op *op)
|
||||
{
|
||||
@@ -1180,11 +1357,15 @@ static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
|
||||
const struct spi_mem_op *op)
|
||||
{
|
||||
struct cqspi_st *cqspi = f_pdata->cqspi;
|
||||
struct device *dev = &cqspi->pdev->dev;
|
||||
const struct cqspi_driver_platdata *ddata;
|
||||
loff_t from = op->addr.val;
|
||||
size_t len = op->data.nbytes;
|
||||
u_char *buf = op->data.buf.in;
|
||||
u64 dma_align = (u64)(uintptr_t)buf;
|
||||
int ret;
|
||||
|
||||
ddata = of_device_get_match_data(dev);
|
||||
ret = cqspi_set_protocol(f_pdata, op);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -1196,6 +1377,10 @@ static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
|
||||
if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size))
|
||||
return cqspi_direct_read_execute(f_pdata, buf, from, len);
|
||||
|
||||
if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma &&
|
||||
virt_addr_valid(buf) && ((dma_align & CQSPI_DMA_UNALIGN) == 0))
|
||||
return ddata->indirect_read_dma(f_pdata, buf, from, len);
|
||||
|
||||
return cqspi_indirect_read_execute(f_pdata, buf, from, len);
|
||||
}
|
||||
|
||||
@@ -1299,6 +1484,7 @@ static int cqspi_of_get_pdata(struct cqspi_st *cqspi)
|
||||
{
|
||||
struct device *dev = &cqspi->pdev->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
u32 id[2];
|
||||
|
||||
cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
|
||||
|
||||
@@ -1323,6 +1509,10 @@ static int cqspi_of_get_pdata(struct cqspi_st *cqspi)
|
||||
|
||||
cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
|
||||
|
||||
if (!of_property_read_u32_array(np, "power-domains", id,
|
||||
ARRAY_SIZE(id)))
|
||||
cqspi->pd_dev_id = id[1];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1359,6 +1549,13 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
|
||||
writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
|
||||
}
|
||||
|
||||
/* Enable DMA interface */
|
||||
if (cqspi->use_dma_read) {
|
||||
reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
|
||||
reg |= CQSPI_REG_CONFIG_DMA_MASK;
|
||||
writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
|
||||
}
|
||||
|
||||
cqspi_controller_enable(cqspi, 1);
|
||||
}
|
||||
|
||||
@@ -1548,6 +1745,12 @@ static int cqspi_probe(struct platform_device *pdev)
|
||||
master->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
|
||||
if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE))
|
||||
cqspi->use_direct_mode = true;
|
||||
if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA)
|
||||
cqspi->use_dma_read = true;
|
||||
|
||||
if (of_device_is_compatible(pdev->dev.of_node,
|
||||
"xlnx,versal-ospi-1.0"))
|
||||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
|
||||
@@ -1656,6 +1859,13 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
|
||||
.quirks = CQSPI_DISABLE_DAC_MODE,
|
||||
};
|
||||
|
||||
static const struct cqspi_driver_platdata versal_ospi = {
|
||||
.hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
|
||||
.quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA,
|
||||
.indirect_read_dma = cqspi_versal_indirect_read_dma,
|
||||
.get_dma_status = cqspi_get_versal_dma_status,
|
||||
};
|
||||
|
||||
static const struct of_device_id cqspi_dt_ids[] = {
|
||||
{
|
||||
.compatible = "cdns,qspi-nor",
|
||||
@@ -1673,6 +1883,10 @@ static const struct of_device_id cqspi_dt_ids[] = {
|
||||
.compatible = "intel,lgm-qspi",
|
||||
.data = &intel_lgm_qspi,
|
||||
},
|
||||
{
|
||||
.compatible = "xlnx,versal-ospi-1.0",
|
||||
.data = (void *)&versal_ospi,
|
||||
},
|
||||
{ /* end of table */ }
|
||||
};
|
||||
|
||||
|
||||
642
drivers/spi/spi-cadence-xspi.c
Normal file
642
drivers/spi/spi-cadence-xspi.c
Normal file
@@ -0,0 +1,642 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
// Cadence XSPI flash controller driver
|
||||
// Copyright (C) 2020-21 Cadence
|
||||
|
||||
#include <linux/completion.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/spi/spi.h>
|
||||
#include <linux/spi/spi-mem.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/log2.h>
|
||||
|
||||
#define CDNS_XSPI_MAGIC_NUM_VALUE 0x6522
|
||||
#define CDNS_XSPI_MAX_BANKS 8
|
||||
#define CDNS_XSPI_NAME "cadence-xspi"
|
||||
|
||||
/*
|
||||
* Note: below are additional auxiliary registers to
|
||||
* configure XSPI controller pin-strap settings
|
||||
*/
|
||||
|
||||
/* PHY DQ timing register */
|
||||
#define CDNS_XSPI_CCP_PHY_DQ_TIMING 0x0000
|
||||
|
||||
/* PHY DQS timing register */
|
||||
#define CDNS_XSPI_CCP_PHY_DQS_TIMING 0x0004
|
||||
|
||||
/* PHY gate loopback control register */
|
||||
#define CDNS_XSPI_CCP_PHY_GATE_LPBCK_CTRL 0x0008
|
||||
|
||||
/* PHY DLL slave control register */
|
||||
#define CDNS_XSPI_CCP_PHY_DLL_SLAVE_CTRL 0x0010
|
||||
|
||||
/* DLL PHY control register */
|
||||
#define CDNS_XSPI_DLL_PHY_CTRL 0x1034
|
||||
|
||||
/* Command registers */
|
||||
#define CDNS_XSPI_CMD_REG_0 0x0000
|
||||
#define CDNS_XSPI_CMD_REG_1 0x0004
|
||||
#define CDNS_XSPI_CMD_REG_2 0x0008
|
||||
#define CDNS_XSPI_CMD_REG_3 0x000C
|
||||
#define CDNS_XSPI_CMD_REG_4 0x0010
|
||||
#define CDNS_XSPI_CMD_REG_5 0x0014
|
||||
|
||||
/* Command status registers */
|
||||
#define CDNS_XSPI_CMD_STATUS_REG 0x0044
|
||||
|
||||
/* Controller status register */
|
||||
#define CDNS_XSPI_CTRL_STATUS_REG 0x0100
|
||||
#define CDNS_XSPI_INIT_COMPLETED BIT(16)
|
||||
#define CDNS_XSPI_INIT_LEGACY BIT(9)
|
||||
#define CDNS_XSPI_INIT_FAIL BIT(8)
|
||||
#define CDNS_XSPI_CTRL_BUSY BIT(7)
|
||||
|
||||
/* Controller interrupt status register */
|
||||
#define CDNS_XSPI_INTR_STATUS_REG 0x0110
|
||||
#define CDNS_XSPI_STIG_DONE BIT(23)
|
||||
#define CDNS_XSPI_SDMA_ERROR BIT(22)
|
||||
#define CDNS_XSPI_SDMA_TRIGGER BIT(21)
|
||||
#define CDNS_XSPI_CMD_IGNRD_EN BIT(20)
|
||||
#define CDNS_XSPI_DDMA_TERR_EN BIT(18)
|
||||
#define CDNS_XSPI_CDMA_TREE_EN BIT(17)
|
||||
#define CDNS_XSPI_CTRL_IDLE_EN BIT(16)
|
||||
|
||||
#define CDNS_XSPI_TRD_COMP_INTR_STATUS 0x0120
|
||||
#define CDNS_XSPI_TRD_ERR_INTR_STATUS 0x0130
|
||||
#define CDNS_XSPI_TRD_ERR_INTR_EN 0x0134
|
||||
|
||||
/* Controller interrupt enable register */
|
||||
#define CDNS_XSPI_INTR_ENABLE_REG 0x0114
|
||||
#define CDNS_XSPI_INTR_EN BIT(31)
|
||||
#define CDNS_XSPI_STIG_DONE_EN BIT(23)
|
||||
#define CDNS_XSPI_SDMA_ERROR_EN BIT(22)
|
||||
#define CDNS_XSPI_SDMA_TRIGGER_EN BIT(21)
|
||||
|
||||
#define CDNS_XSPI_INTR_MASK (CDNS_XSPI_INTR_EN | \
|
||||
CDNS_XSPI_STIG_DONE_EN | \
|
||||
CDNS_XSPI_SDMA_ERROR_EN | \
|
||||
CDNS_XSPI_SDMA_TRIGGER_EN)
|
||||
|
||||
/* Controller config register */
|
||||
#define CDNS_XSPI_CTRL_CONFIG_REG 0x0230
|
||||
#define CDNS_XSPI_CTRL_WORK_MODE GENMASK(6, 5)
|
||||
|
||||
#define CDNS_XSPI_WORK_MODE_DIRECT 0
|
||||
#define CDNS_XSPI_WORK_MODE_STIG 1
|
||||
#define CDNS_XSPI_WORK_MODE_ACMD 3
|
||||
|
||||
/* SDMA trigger transaction registers */
|
||||
#define CDNS_XSPI_SDMA_SIZE_REG 0x0240
|
||||
#define CDNS_XSPI_SDMA_TRD_INFO_REG 0x0244
|
||||
#define CDNS_XSPI_SDMA_DIR BIT(8)
|
||||
|
||||
/* Controller features register */
|
||||
#define CDNS_XSPI_CTRL_FEATURES_REG 0x0F04
|
||||
#define CDNS_XSPI_NUM_BANKS GENMASK(25, 24)
|
||||
#define CDNS_XSPI_DMA_DATA_WIDTH BIT(21)
|
||||
#define CDNS_XSPI_NUM_THREADS GENMASK(3, 0)
|
||||
|
||||
/* Controller version register */
|
||||
#define CDNS_XSPI_CTRL_VERSION_REG 0x0F00
|
||||
#define CDNS_XSPI_MAGIC_NUM GENMASK(31, 16)
|
||||
#define CDNS_XSPI_CTRL_REV GENMASK(7, 0)
|
||||
|
||||
/* STIG Profile 1.0 instruction fields (split into registers) */
|
||||
#define CDNS_XSPI_CMD_INSTR_TYPE GENMASK(6, 0)
|
||||
#define CDNS_XSPI_CMD_P1_R1_ADDR0 GENMASK(31, 24)
|
||||
#define CDNS_XSPI_CMD_P1_R2_ADDR1 GENMASK(7, 0)
|
||||
#define CDNS_XSPI_CMD_P1_R2_ADDR2 GENMASK(15, 8)
|
||||
#define CDNS_XSPI_CMD_P1_R2_ADDR3 GENMASK(23, 16)
|
||||
#define CDNS_XSPI_CMD_P1_R2_ADDR4 GENMASK(31, 24)
|
||||
#define CDNS_XSPI_CMD_P1_R3_ADDR5 GENMASK(7, 0)
|
||||
#define CDNS_XSPI_CMD_P1_R3_CMD GENMASK(23, 16)
|
||||
#define CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES GENMASK(30, 28)
|
||||
#define CDNS_XSPI_CMD_P1_R4_ADDR_IOS GENMASK(1, 0)
|
||||
#define CDNS_XSPI_CMD_P1_R4_CMD_IOS GENMASK(9, 8)
|
||||
#define CDNS_XSPI_CMD_P1_R4_BANK GENMASK(14, 12)
|
||||
|
||||
/* STIG data sequence instruction fields (split into registers) */
|
||||
#define CDNS_XSPI_CMD_DSEQ_R2_DCNT_L GENMASK(31, 16)
|
||||
#define CDNS_XSPI_CMD_DSEQ_R3_DCNT_H GENMASK(15, 0)
|
||||
#define CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY GENMASK(25, 20)
|
||||
#define CDNS_XSPI_CMD_DSEQ_R4_BANK GENMASK(14, 12)
|
||||
#define CDNS_XSPI_CMD_DSEQ_R4_DATA_IOS GENMASK(9, 8)
|
||||
#define CDNS_XSPI_CMD_DSEQ_R4_DIR BIT(4)
|
||||
|
||||
/* STIG command status fields */
|
||||
#define CDNS_XSPI_CMD_STATUS_COMPLETED BIT(15)
|
||||
#define CDNS_XSPI_CMD_STATUS_FAILED BIT(14)
|
||||
#define CDNS_XSPI_CMD_STATUS_DQS_ERROR BIT(3)
|
||||
#define CDNS_XSPI_CMD_STATUS_CRC_ERROR BIT(2)
|
||||
#define CDNS_XSPI_CMD_STATUS_BUS_ERROR BIT(1)
|
||||
#define CDNS_XSPI_CMD_STATUS_INV_SEQ_ERROR BIT(0)
|
||||
|
||||
#define CDNS_XSPI_STIG_DONE_FLAG BIT(0)
|
||||
#define CDNS_XSPI_TRD_STATUS 0x0104
|
||||
|
||||
/* Helper macros for filling command registers */
|
||||
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase) ( \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, (data_phase) ? \
|
||||
CDNS_XSPI_STIG_INSTR_TYPE_1 : CDNS_XSPI_STIG_INSTR_TYPE_0) | \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_P1_R1_ADDR0, (op)->addr.val & 0xff))
|
||||
|
||||
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op) ( \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR1, ((op)->addr.val >> 8) & 0xFF) | \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR2, ((op)->addr.val >> 16) & 0xFF) | \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR3, ((op)->addr.val >> 24) & 0xFF) | \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR4, ((op)->addr.val >> 32) & 0xFF))
|
||||
|
||||
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op) ( \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_P1_R3_ADDR5, ((op)->addr.val >> 40) & 0xFF) | \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_P1_R3_CMD, (op)->cmd.opcode) | \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES, (op)->addr.nbytes))
|
||||
|
||||
#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, chipsel) ( \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_P1_R4_ADDR_IOS, ilog2((op)->addr.buswidth)) | \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_P1_R4_CMD_IOS, ilog2((op)->cmd.buswidth)) | \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_P1_R4_BANK, chipsel))
|
||||
|
||||
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op) \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, CDNS_XSPI_STIG_INSTR_TYPE_DATA_SEQ)
|
||||
|
||||
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op) \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, (op)->data.nbytes & 0xFFFF)
|
||||
|
||||
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
|
||||
((op)->data.nbytes >> 16) & 0xffff) | \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, (op)->dummy.nbytes * 8))
|
||||
|
||||
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_BANK, chipsel) | \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_DATA_IOS, \
|
||||
ilog2((op)->data.buswidth)) | \
|
||||
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_DIR, \
|
||||
((op)->data.dir == SPI_MEM_DATA_IN) ? \
|
||||
CDNS_XSPI_STIG_CMD_DIR_READ : CDNS_XSPI_STIG_CMD_DIR_WRITE))
|
||||
|
||||
enum cdns_xspi_stig_instr_type {
|
||||
CDNS_XSPI_STIG_INSTR_TYPE_0,
|
||||
CDNS_XSPI_STIG_INSTR_TYPE_1,
|
||||
CDNS_XSPI_STIG_INSTR_TYPE_DATA_SEQ = 127,
|
||||
};
|
||||
|
||||
enum cdns_xspi_sdma_dir {
|
||||
CDNS_XSPI_SDMA_DIR_READ,
|
||||
CDNS_XSPI_SDMA_DIR_WRITE,
|
||||
};
|
||||
|
||||
enum cdns_xspi_stig_cmd_dir {
|
||||
CDNS_XSPI_STIG_CMD_DIR_READ,
|
||||
CDNS_XSPI_STIG_CMD_DIR_WRITE,
|
||||
};
|
||||
|
||||
struct cdns_xspi_dev {
|
||||
struct platform_device *pdev;
|
||||
struct device *dev;
|
||||
|
||||
void __iomem *iobase;
|
||||
void __iomem *auxbase;
|
||||
void __iomem *sdmabase;
|
||||
|
||||
int irq;
|
||||
int cur_cs;
|
||||
unsigned int sdmasize;
|
||||
|
||||
struct completion cmd_complete;
|
||||
struct completion auto_cmd_complete;
|
||||
struct completion sdma_complete;
|
||||
bool sdma_error;
|
||||
|
||||
void *in_buffer;
|
||||
const void *out_buffer;
|
||||
|
||||
u8 hw_num_banks;
|
||||
};
|
||||
|
||||
static int cdns_xspi_wait_for_controller_idle(struct cdns_xspi_dev *cdns_xspi)
|
||||
{
|
||||
u32 ctrl_stat;
|
||||
|
||||
return readl_relaxed_poll_timeout(cdns_xspi->iobase +
|
||||
CDNS_XSPI_CTRL_STATUS_REG,
|
||||
ctrl_stat,
|
||||
((ctrl_stat &
|
||||
CDNS_XSPI_CTRL_BUSY) == 0),
|
||||
100, 1000);
|
||||
}
|
||||
|
||||
static void cdns_xspi_trigger_command(struct cdns_xspi_dev *cdns_xspi,
|
||||
u32 cmd_regs[6])
|
||||
{
|
||||
writel(cmd_regs[5], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_5);
|
||||
writel(cmd_regs[4], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_4);
|
||||
writel(cmd_regs[3], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_3);
|
||||
writel(cmd_regs[2], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_2);
|
||||
writel(cmd_regs[1], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_1);
|
||||
writel(cmd_regs[0], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_0);
|
||||
}
|
||||
|
||||
static int cdns_xspi_check_command_status(struct cdns_xspi_dev *cdns_xspi)
|
||||
{
|
||||
int ret = 0;
|
||||
u32 cmd_status = readl(cdns_xspi->iobase + CDNS_XSPI_CMD_STATUS_REG);
|
||||
|
||||
if (cmd_status & CDNS_XSPI_CMD_STATUS_COMPLETED) {
|
||||
if ((cmd_status & CDNS_XSPI_CMD_STATUS_FAILED) != 0) {
|
||||
if (cmd_status & CDNS_XSPI_CMD_STATUS_DQS_ERROR) {
|
||||
dev_err(cdns_xspi->dev,
|
||||
"Incorrect DQS pulses detected\n");
|
||||
ret = -EPROTO;
|
||||
}
|
||||
if (cmd_status & CDNS_XSPI_CMD_STATUS_CRC_ERROR) {
|
||||
dev_err(cdns_xspi->dev,
|
||||
"CRC error received\n");
|
||||
ret = -EPROTO;
|
||||
}
|
||||
if (cmd_status & CDNS_XSPI_CMD_STATUS_BUS_ERROR) {
|
||||
dev_err(cdns_xspi->dev,
|
||||
"Error resp on system DMA interface\n");
|
||||
ret = -EPROTO;
|
||||
}
|
||||
if (cmd_status & CDNS_XSPI_CMD_STATUS_INV_SEQ_ERROR) {
|
||||
dev_err(cdns_xspi->dev,
|
||||
"Invalid command sequence detected\n");
|
||||
ret = -EPROTO;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dev_err(cdns_xspi->dev, "Fatal err - command not completed\n");
|
||||
ret = -EPROTO;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cdns_xspi_set_interrupts(struct cdns_xspi_dev *cdns_xspi,
|
||||
bool enabled)
|
||||
{
|
||||
u32 intr_enable;
|
||||
|
||||
intr_enable = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG);
|
||||
if (enabled)
|
||||
intr_enable |= CDNS_XSPI_INTR_MASK;
|
||||
else
|
||||
intr_enable &= ~CDNS_XSPI_INTR_MASK;
|
||||
writel(intr_enable, cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG);
|
||||
}
|
||||
|
||||
static int cdns_xspi_controller_init(struct cdns_xspi_dev *cdns_xspi)
|
||||
{
|
||||
u32 ctrl_ver;
|
||||
u32 ctrl_features;
|
||||
u16 hw_magic_num;
|
||||
|
||||
ctrl_ver = readl(cdns_xspi->iobase + CDNS_XSPI_CTRL_VERSION_REG);
|
||||
hw_magic_num = FIELD_GET(CDNS_XSPI_MAGIC_NUM, ctrl_ver);
|
||||
if (hw_magic_num != CDNS_XSPI_MAGIC_NUM_VALUE) {
|
||||
dev_err(cdns_xspi->dev,
|
||||
"Incorrect XSPI magic number: %x, expected: %x\n",
|
||||
hw_magic_num, CDNS_XSPI_MAGIC_NUM_VALUE);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
ctrl_features = readl(cdns_xspi->iobase + CDNS_XSPI_CTRL_FEATURES_REG);
|
||||
cdns_xspi->hw_num_banks = FIELD_GET(CDNS_XSPI_NUM_BANKS, ctrl_features);
|
||||
cdns_xspi_set_interrupts(cdns_xspi, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cdns_xspi_sdma_handle(struct cdns_xspi_dev *cdns_xspi)
|
||||
{
|
||||
u32 sdma_size, sdma_trd_info;
|
||||
u8 sdma_dir;
|
||||
|
||||
sdma_size = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_SIZE_REG);
|
||||
sdma_trd_info = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_TRD_INFO_REG);
|
||||
sdma_dir = FIELD_GET(CDNS_XSPI_SDMA_DIR, sdma_trd_info);
|
||||
|
||||
switch (sdma_dir) {
|
||||
case CDNS_XSPI_SDMA_DIR_READ:
|
||||
ioread8_rep(cdns_xspi->sdmabase,
|
||||
cdns_xspi->in_buffer, sdma_size);
|
||||
break;
|
||||
|
||||
case CDNS_XSPI_SDMA_DIR_WRITE:
|
||||
iowrite8_rep(cdns_xspi->sdmabase,
|
||||
cdns_xspi->out_buffer, sdma_size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
|
||||
const struct spi_mem_op *op,
|
||||
bool data_phase)
|
||||
{
|
||||
u32 cmd_regs[6];
|
||||
u32 cmd_status;
|
||||
int ret;
|
||||
|
||||
ret = cdns_xspi_wait_for_controller_idle(cdns_xspi);
|
||||
if (ret < 0)
|
||||
return -EIO;
|
||||
|
||||
writel(FIELD_PREP(CDNS_XSPI_CTRL_WORK_MODE, CDNS_XSPI_WORK_MODE_STIG),
|
||||
cdns_xspi->iobase + CDNS_XSPI_CTRL_CONFIG_REG);
|
||||
|
||||
cdns_xspi_set_interrupts(cdns_xspi, true);
|
||||
cdns_xspi->sdma_error = false;
|
||||
|
||||
memset(cmd_regs, 0, sizeof(cmd_regs));
|
||||
cmd_regs[1] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase);
|
||||
cmd_regs[2] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op);
|
||||
cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op);
|
||||
cmd_regs[4] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op,
|
||||
cdns_xspi->cur_cs);
|
||||
|
||||
cdns_xspi_trigger_command(cdns_xspi, cmd_regs);
|
||||
|
||||
if (data_phase) {
|
||||
cmd_regs[0] = CDNS_XSPI_STIG_DONE_FLAG;
|
||||
cmd_regs[1] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op);
|
||||
cmd_regs[2] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op);
|
||||
cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op);
|
||||
cmd_regs[4] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op,
|
||||
cdns_xspi->cur_cs);
|
||||
|
||||
cdns_xspi->in_buffer = op->data.buf.in;
|
||||
cdns_xspi->out_buffer = op->data.buf.out;
|
||||
|
||||
cdns_xspi_trigger_command(cdns_xspi, cmd_regs);
|
||||
|
||||
wait_for_completion(&cdns_xspi->sdma_complete);
|
||||
if (cdns_xspi->sdma_error) {
|
||||
cdns_xspi_set_interrupts(cdns_xspi, false);
|
||||
return -EIO;
|
||||
}
|
||||
cdns_xspi_sdma_handle(cdns_xspi);
|
||||
}
|
||||
|
||||
wait_for_completion(&cdns_xspi->cmd_complete);
|
||||
cdns_xspi_set_interrupts(cdns_xspi, false);
|
||||
|
||||
cmd_status = cdns_xspi_check_command_status(cdns_xspi);
|
||||
if (cmd_status)
|
||||
return -EPROTO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cdns_xspi_mem_op(struct cdns_xspi_dev *cdns_xspi,
|
||||
struct spi_mem *mem,
|
||||
const struct spi_mem_op *op)
|
||||
{
|
||||
enum spi_mem_data_dir dir = op->data.dir;
|
||||
|
||||
if (cdns_xspi->cur_cs != mem->spi->chip_select)
|
||||
cdns_xspi->cur_cs = mem->spi->chip_select;
|
||||
|
||||
return cdns_xspi_send_stig_command(cdns_xspi, op,
|
||||
(dir != SPI_MEM_NO_DATA));
|
||||
}
|
||||
|
||||
static int cdns_xspi_mem_op_execute(struct spi_mem *mem,
|
||||
const struct spi_mem_op *op)
|
||||
{
|
||||
struct cdns_xspi_dev *cdns_xspi =
|
||||
spi_master_get_devdata(mem->spi->master);
|
||||
int ret = 0;
|
||||
|
||||
ret = cdns_xspi_mem_op(cdns_xspi, mem, op);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
|
||||
{
|
||||
struct cdns_xspi_dev *cdns_xspi =
|
||||
spi_master_get_devdata(mem->spi->master);
|
||||
|
||||
op->data.nbytes = clamp_val(op->data.nbytes, 0, cdns_xspi->sdmasize);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct spi_controller_mem_ops cadence_xspi_mem_ops = {
|
||||
.exec_op = cdns_xspi_mem_op_execute,
|
||||
.adjust_op_size = cdns_xspi_adjust_mem_op_size,
|
||||
};
|
||||
|
||||
static irqreturn_t cdns_xspi_irq_handler(int this_irq, void *dev)
|
||||
{
|
||||
struct cdns_xspi_dev *cdns_xspi = dev;
|
||||
u32 irq_status;
|
||||
irqreturn_t result = IRQ_NONE;
|
||||
|
||||
irq_status = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG);
|
||||
writel(irq_status, cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG);
|
||||
|
||||
if (irq_status &
|
||||
(CDNS_XSPI_SDMA_ERROR | CDNS_XSPI_SDMA_TRIGGER |
|
||||
CDNS_XSPI_STIG_DONE)) {
|
||||
if (irq_status & CDNS_XSPI_SDMA_ERROR) {
|
||||
dev_err(cdns_xspi->dev,
|
||||
"Slave DMA transaction error\n");
|
||||
cdns_xspi->sdma_error = true;
|
||||
complete(&cdns_xspi->sdma_complete);
|
||||
}
|
||||
|
||||
if (irq_status & CDNS_XSPI_SDMA_TRIGGER)
|
||||
complete(&cdns_xspi->sdma_complete);
|
||||
|
||||
if (irq_status & CDNS_XSPI_STIG_DONE)
|
||||
complete(&cdns_xspi->cmd_complete);
|
||||
|
||||
result = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
irq_status = readl(cdns_xspi->iobase + CDNS_XSPI_TRD_COMP_INTR_STATUS);
|
||||
if (irq_status) {
|
||||
writel(irq_status,
|
||||
cdns_xspi->iobase + CDNS_XSPI_TRD_COMP_INTR_STATUS);
|
||||
|
||||
complete(&cdns_xspi->auto_cmd_complete);
|
||||
|
||||
result = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int cdns_xspi_of_get_plat_data(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *node_prop = pdev->dev.of_node;
|
||||
struct device_node *node_child;
|
||||
unsigned int cs;
|
||||
|
||||
for_each_child_of_node(node_prop, node_child) {
|
||||
if (!of_device_is_available(node_child))
|
||||
continue;
|
||||
|
||||
if (of_property_read_u32(node_child, "reg", &cs)) {
|
||||
dev_err(&pdev->dev, "Couldn't get memory chip select\n");
|
||||
of_node_put(node_child);
|
||||
return -ENXIO;
|
||||
} else if (cs >= CDNS_XSPI_MAX_BANKS) {
|
||||
dev_err(&pdev->dev, "reg (cs) parameter value too large\n");
|
||||
of_node_put(node_child);
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cdns_xspi_print_phy_config(struct cdns_xspi_dev *cdns_xspi)
|
||||
{
|
||||
struct device *dev = cdns_xspi->dev;
|
||||
|
||||
dev_info(dev, "PHY configuration\n");
|
||||
dev_info(dev, " * xspi_dll_phy_ctrl: %08x\n",
|
||||
readl(cdns_xspi->iobase + CDNS_XSPI_DLL_PHY_CTRL));
|
||||
dev_info(dev, " * phy_dq_timing: %08x\n",
|
||||
readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DQ_TIMING));
|
||||
dev_info(dev, " * phy_dqs_timing: %08x\n",
|
||||
readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DQS_TIMING));
|
||||
dev_info(dev, " * phy_gate_loopback_ctrl: %08x\n",
|
||||
readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_GATE_LPBCK_CTRL));
|
||||
dev_info(dev, " * phy_dll_slave_ctrl: %08x\n",
|
||||
readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DLL_SLAVE_CTRL));
|
||||
}
|
||||
|
||||
static int cdns_xspi_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct spi_master *master = NULL;
|
||||
struct cdns_xspi_dev *cdns_xspi = NULL;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
master = devm_spi_alloc_master(dev, sizeof(*cdns_xspi));
|
||||
if (!master)
|
||||
return -ENOMEM;
|
||||
|
||||
master->mode_bits = SPI_3WIRE | SPI_TX_DUAL | SPI_TX_QUAD |
|
||||
SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL | SPI_RX_OCTAL |
|
||||
SPI_MODE_0 | SPI_MODE_3;
|
||||
|
||||
master->mem_ops = &cadence_xspi_mem_ops;
|
||||
master->dev.of_node = pdev->dev.of_node;
|
||||
master->bus_num = -1;
|
||||
|
||||
platform_set_drvdata(pdev, master);
|
||||
|
||||
cdns_xspi = spi_master_get_devdata(master);
|
||||
cdns_xspi->pdev = pdev;
|
||||
cdns_xspi->dev = &pdev->dev;
|
||||
cdns_xspi->cur_cs = 0;
|
||||
|
||||
init_completion(&cdns_xspi->cmd_complete);
|
||||
init_completion(&cdns_xspi->auto_cmd_complete);
|
||||
init_completion(&cdns_xspi->sdma_complete);
|
||||
|
||||
ret = cdns_xspi_of_get_plat_data(pdev);
|
||||
if (ret)
|
||||
return -ENODEV;
|
||||
|
||||
cdns_xspi->iobase = devm_platform_ioremap_resource_byname(pdev, "io");
|
||||
if (IS_ERR(cdns_xspi->iobase)) {
|
||||
dev_err(dev, "Failed to remap controller base address\n");
|
||||
return PTR_ERR(cdns_xspi->iobase);
|
||||
}
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sdma");
|
||||
cdns_xspi->sdmabase = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(cdns_xspi->sdmabase)) {
|
||||
dev_err(dev, "Failed to remap SDMA address\n");
|
||||
return PTR_ERR(cdns_xspi->sdmabase);
|
||||
}
|
||||
cdns_xspi->sdmasize = resource_size(res);
|
||||
|
||||
cdns_xspi->auxbase = devm_platform_ioremap_resource_byname(pdev, "aux");
|
||||
if (IS_ERR(cdns_xspi->auxbase)) {
|
||||
dev_err(dev, "Failed to remap AUX address\n");
|
||||
return PTR_ERR(cdns_xspi->auxbase);
|
||||
}
|
||||
|
||||
cdns_xspi->irq = platform_get_irq(pdev, 0);
|
||||
if (cdns_xspi->irq < 0) {
|
||||
dev_err(dev, "Failed to get IRQ\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, cdns_xspi->irq, cdns_xspi_irq_handler,
|
||||
IRQF_SHARED, pdev->name, cdns_xspi);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to request IRQ: %d\n", cdns_xspi->irq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
cdns_xspi_print_phy_config(cdns_xspi);
|
||||
|
||||
ret = cdns_xspi_controller_init(cdns_xspi);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to initialize controller\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
master->num_chipselect = 1 << cdns_xspi->hw_num_banks;
|
||||
|
||||
ret = devm_spi_register_master(dev, master);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to register SPI master\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_info(dev, "Successfully registered SPI master\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static const struct of_device_id cdns_xspi_of_match[] = {
|
||||
{
|
||||
.compatible = "cdns,xspi-nor",
|
||||
},
|
||||
{ /* end of table */}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, cdns_xspi_of_match);
|
||||
#else
|
||||
#define cdns_xspi_of_match NULL
|
||||
#endif /* CONFIG_OF */
|
||||
|
||||
static struct platform_driver cdns_xspi_platform_driver = {
|
||||
.probe = cdns_xspi_probe,
|
||||
.remove = NULL,
|
||||
.driver = {
|
||||
.name = CDNS_XSPI_NAME,
|
||||
.of_match_table = cdns_xspi_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(cdns_xspi_platform_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Cadence XSPI Controller Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS("platform:" CDNS_XSPI_NAME);
|
||||
MODULE_AUTHOR("Konrad Kociolek <konrad@cadence.com>");
|
||||
MODULE_AUTHOR("Jayshri Pawar <jpawar@cadence.com>");
|
||||
MODULE_AUTHOR("Parshuram Thombare <pthombar@cadence.com>");
|
||||
@@ -67,9 +67,14 @@
|
||||
SPI_FSI_STATUS_RDR_OVERRUN)
|
||||
#define SPI_FSI_PORT_CTRL 0x9
|
||||
|
||||
struct fsi2spi {
|
||||
struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
|
||||
struct mutex lock; /* lock access to the device */
|
||||
};
|
||||
|
||||
struct fsi_spi {
|
||||
struct device *dev; /* SPI controller device */
|
||||
struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
|
||||
struct fsi2spi *bridge; /* FSI2SPI device */
|
||||
u32 base;
|
||||
};
|
||||
|
||||
@@ -104,7 +109,7 @@ static int fsi_spi_check_status(struct fsi_spi *ctx)
|
||||
u32 sts;
|
||||
__be32 sts_be;
|
||||
|
||||
rc = fsi_device_read(ctx->fsi, FSI2SPI_STATUS, &sts_be,
|
||||
rc = fsi_device_read(ctx->bridge->fsi, FSI2SPI_STATUS, &sts_be,
|
||||
sizeof(sts_be));
|
||||
if (rc)
|
||||
return rc;
|
||||
@@ -120,73 +125,91 @@ static int fsi_spi_check_status(struct fsi_spi *ctx)
|
||||
|
||||
static int fsi_spi_read_reg(struct fsi_spi *ctx, u32 offset, u64 *value)
|
||||
{
|
||||
int rc;
|
||||
int rc = 0;
|
||||
__be32 cmd_be;
|
||||
__be32 data_be;
|
||||
u32 cmd = offset + ctx->base;
|
||||
struct fsi2spi *bridge = ctx->bridge;
|
||||
|
||||
*value = 0ULL;
|
||||
|
||||
if (cmd & FSI2SPI_CMD_WRITE)
|
||||
return -EINVAL;
|
||||
|
||||
cmd_be = cpu_to_be32(cmd);
|
||||
rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be));
|
||||
rc = mutex_lock_interruptible(&bridge->lock);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
cmd_be = cpu_to_be32(cmd);
|
||||
rc = fsi_device_write(bridge->fsi, FSI2SPI_CMD, &cmd_be,
|
||||
sizeof(cmd_be));
|
||||
if (rc)
|
||||
goto unlock;
|
||||
|
||||
rc = fsi_spi_check_status(ctx);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto unlock;
|
||||
|
||||
rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA0, &data_be,
|
||||
rc = fsi_device_read(bridge->fsi, FSI2SPI_DATA0, &data_be,
|
||||
sizeof(data_be));
|
||||
if (rc)
|
||||
return rc;
|
||||
goto unlock;
|
||||
|
||||
*value |= (u64)be32_to_cpu(data_be) << 32;
|
||||
|
||||
rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA1, &data_be,
|
||||
rc = fsi_device_read(bridge->fsi, FSI2SPI_DATA1, &data_be,
|
||||
sizeof(data_be));
|
||||
if (rc)
|
||||
return rc;
|
||||
goto unlock;
|
||||
|
||||
*value |= (u64)be32_to_cpu(data_be);
|
||||
dev_dbg(ctx->dev, "Read %02x[%016llx].\n", offset, *value);
|
||||
|
||||
return 0;
|
||||
unlock:
|
||||
mutex_unlock(&bridge->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int fsi_spi_write_reg(struct fsi_spi *ctx, u32 offset, u64 value)
|
||||
{
|
||||
int rc;
|
||||
int rc = 0;
|
||||
__be32 cmd_be;
|
||||
__be32 data_be;
|
||||
u32 cmd = offset + ctx->base;
|
||||
struct fsi2spi *bridge = ctx->bridge;
|
||||
|
||||
if (cmd & FSI2SPI_CMD_WRITE)
|
||||
return -EINVAL;
|
||||
|
||||
rc = mutex_lock_interruptible(&bridge->lock);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
dev_dbg(ctx->dev, "Write %02x[%016llx].\n", offset, value);
|
||||
|
||||
data_be = cpu_to_be32(upper_32_bits(value));
|
||||
rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA0, &data_be,
|
||||
rc = fsi_device_write(bridge->fsi, FSI2SPI_DATA0, &data_be,
|
||||
sizeof(data_be));
|
||||
if (rc)
|
||||
return rc;
|
||||
goto unlock;
|
||||
|
||||
data_be = cpu_to_be32(lower_32_bits(value));
|
||||
rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA1, &data_be,
|
||||
rc = fsi_device_write(bridge->fsi, FSI2SPI_DATA1, &data_be,
|
||||
sizeof(data_be));
|
||||
if (rc)
|
||||
return rc;
|
||||
goto unlock;
|
||||
|
||||
cmd_be = cpu_to_be32(cmd | FSI2SPI_CMD_WRITE);
|
||||
rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be));
|
||||
rc = fsi_device_write(bridge->fsi, FSI2SPI_CMD, &cmd_be,
|
||||
sizeof(cmd_be));
|
||||
if (rc)
|
||||
return rc;
|
||||
goto unlock;
|
||||
|
||||
return fsi_spi_check_status(ctx);
|
||||
rc = fsi_spi_check_status(ctx);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&bridge->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int fsi_spi_data_in(u64 in, u8 *rx, int len)
|
||||
@@ -234,6 +257,26 @@ static int fsi_spi_reset(struct fsi_spi *ctx)
|
||||
return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
|
||||
}
|
||||
|
||||
static int fsi_spi_status(struct fsi_spi *ctx, u64 *status, const char *dir)
|
||||
{
|
||||
int rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, status);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (*status & SPI_FSI_STATUS_ANY_ERROR) {
|
||||
dev_err(ctx->dev, "%s error: %016llx\n", dir, *status);
|
||||
|
||||
rc = fsi_spi_reset(ctx);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return -EREMOTEIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
|
||||
{
|
||||
/*
|
||||
@@ -273,18 +316,9 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
|
||||
return rc;
|
||||
|
||||
do {
|
||||
rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
|
||||
&status);
|
||||
rc = fsi_spi_status(ctx, &status, "TX");
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (status & SPI_FSI_STATUS_ANY_ERROR) {
|
||||
rc = fsi_spi_reset(ctx);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return -EREMOTEIO;
|
||||
}
|
||||
} while (status & SPI_FSI_STATUS_TDR_FULL);
|
||||
|
||||
sent += nb;
|
||||
@@ -296,18 +330,9 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
|
||||
|
||||
while (transfer->len > recv) {
|
||||
do {
|
||||
rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
|
||||
&status);
|
||||
rc = fsi_spi_status(ctx, &status, "RX");
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (status & SPI_FSI_STATUS_ANY_ERROR) {
|
||||
rc = fsi_spi_reset(ctx);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return -EREMOTEIO;
|
||||
}
|
||||
} while (!(status & SPI_FSI_STATUS_RDR_FULL));
|
||||
|
||||
rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in);
|
||||
@@ -348,8 +373,12 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
|
||||
if (status & (SPI_FSI_STATUS_ANY_ERROR |
|
||||
SPI_FSI_STATUS_TDR_FULL |
|
||||
SPI_FSI_STATUS_RDR_FULL)) {
|
||||
if (reset)
|
||||
if (reset) {
|
||||
dev_err(ctx->dev,
|
||||
"Initialization error: %08llx\n",
|
||||
status);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
rc = fsi_spi_reset(ctx);
|
||||
if (rc)
|
||||
@@ -388,7 +417,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
|
||||
struct spi_transfer *transfer;
|
||||
struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
|
||||
|
||||
rc = fsi_spi_check_mux(ctx->fsi, ctx->dev);
|
||||
rc = fsi_spi_check_mux(ctx->bridge->fsi, ctx->dev);
|
||||
if (rc)
|
||||
goto error;
|
||||
|
||||
@@ -478,12 +507,20 @@ static int fsi_spi_probe(struct device *dev)
|
||||
int rc;
|
||||
struct device_node *np;
|
||||
int num_controllers_registered = 0;
|
||||
struct fsi2spi *bridge;
|
||||
struct fsi_device *fsi = to_fsi_dev(dev);
|
||||
|
||||
rc = fsi_spi_check_mux(fsi, dev);
|
||||
if (rc)
|
||||
return -ENODEV;
|
||||
|
||||
bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
|
||||
if (!bridge)
|
||||
return -ENOMEM;
|
||||
|
||||
bridge->fsi = fsi;
|
||||
mutex_init(&bridge->lock);
|
||||
|
||||
for_each_available_child_of_node(dev->of_node, np) {
|
||||
u32 base;
|
||||
struct fsi_spi *ctx;
|
||||
@@ -506,7 +543,7 @@ static int fsi_spi_probe(struct device *dev)
|
||||
|
||||
ctx = spi_controller_get_devdata(ctlr);
|
||||
ctx->dev = &ctlr->dev;
|
||||
ctx->fsi = fsi;
|
||||
ctx->bridge = bridge;
|
||||
ctx->base = base + SPI_FSI_BASE;
|
||||
|
||||
rc = devm_spi_register_controller(dev, ctlr);
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma/qcom-gpi-dma.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/log2.h>
|
||||
@@ -63,6 +66,15 @@
|
||||
#define TIMESTAMP_AFTER BIT(3)
|
||||
#define POST_CMD_DELAY BIT(4)
|
||||
|
||||
#define GSI_LOOPBACK_EN BIT(0)
|
||||
#define GSI_CS_TOGGLE BIT(3)
|
||||
#define GSI_CPHA BIT(4)
|
||||
#define GSI_CPOL BIT(5)
|
||||
|
||||
#define MAX_TX_SG 3
|
||||
#define NUM_SPI_XFER 8
|
||||
#define SPI_XFER_TIMEOUT_MS 250
|
||||
|
||||
struct spi_geni_master {
|
||||
struct geni_se se;
|
||||
struct device *dev;
|
||||
@@ -84,6 +96,9 @@ struct spi_geni_master {
|
||||
int irq;
|
||||
bool cs_flag;
|
||||
bool abort_failed;
|
||||
struct dma_chan *tx;
|
||||
struct dma_chan *rx;
|
||||
int cur_xfer_mode;
|
||||
};
|
||||
|
||||
static int get_spi_clk_cfg(unsigned int speed_hz,
|
||||
@@ -330,34 +345,197 @@ static int setup_fifo_params(struct spi_device *spi_slv,
|
||||
return geni_spi_set_clock_and_bw(mas, spi_slv->max_speed_hz);
|
||||
}
|
||||
|
||||
static void
|
||||
spi_gsi_callback_result(void *cb, const struct dmaengine_result *result)
|
||||
{
|
||||
struct spi_master *spi = cb;
|
||||
|
||||
if (result->result != DMA_TRANS_NOERROR) {
|
||||
dev_err(&spi->dev, "DMA txn failed: %d\n", result->result);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!result->residue) {
|
||||
dev_dbg(&spi->dev, "DMA txn completed\n");
|
||||
spi_finalize_current_transfer(spi);
|
||||
} else {
|
||||
dev_err(&spi->dev, "DMA xfer has pending: %d\n", result->residue);
|
||||
}
|
||||
}
|
||||
|
||||
static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas,
|
||||
struct spi_device *spi_slv, struct spi_master *spi)
|
||||
{
|
||||
unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
|
||||
struct dma_slave_config config = {};
|
||||
struct gpi_spi_config peripheral = {};
|
||||
struct dma_async_tx_descriptor *tx_desc, *rx_desc;
|
||||
int ret;
|
||||
|
||||
config.peripheral_config = &peripheral;
|
||||
config.peripheral_size = sizeof(peripheral);
|
||||
peripheral.set_config = true;
|
||||
|
||||
if (xfer->bits_per_word != mas->cur_bits_per_word ||
|
||||
xfer->speed_hz != mas->cur_speed_hz) {
|
||||
mas->cur_bits_per_word = xfer->bits_per_word;
|
||||
mas->cur_speed_hz = xfer->speed_hz;
|
||||
}
|
||||
|
||||
if (xfer->tx_buf && xfer->rx_buf) {
|
||||
peripheral.cmd = SPI_DUPLEX;
|
||||
} else if (xfer->tx_buf) {
|
||||
peripheral.cmd = SPI_TX;
|
||||
peripheral.rx_len = 0;
|
||||
} else if (xfer->rx_buf) {
|
||||
peripheral.cmd = SPI_RX;
|
||||
if (!(mas->cur_bits_per_word % MIN_WORD_LEN)) {
|
||||
peripheral.rx_len = ((xfer->len << 3) / mas->cur_bits_per_word);
|
||||
} else {
|
||||
int bytes_per_word = (mas->cur_bits_per_word / BITS_PER_BYTE) + 1;
|
||||
|
||||
peripheral.rx_len = (xfer->len / bytes_per_word);
|
||||
}
|
||||
}
|
||||
|
||||
peripheral.loopback_en = !!(spi_slv->mode & SPI_LOOP);
|
||||
peripheral.clock_pol_high = !!(spi_slv->mode & SPI_CPOL);
|
||||
peripheral.data_pol_high = !!(spi_slv->mode & SPI_CPHA);
|
||||
peripheral.cs = spi_slv->chip_select;
|
||||
peripheral.pack_en = true;
|
||||
peripheral.word_len = xfer->bits_per_word - MIN_WORD_LEN;
|
||||
|
||||
ret = get_spi_clk_cfg(mas->cur_speed_hz, mas,
|
||||
&peripheral.clk_src, &peripheral.clk_div);
|
||||
if (ret) {
|
||||
dev_err(mas->dev, "Err in get_spi_clk_cfg() :%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!xfer->cs_change) {
|
||||
if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
|
||||
peripheral.fragmentation = FRAGMENTATION;
|
||||
}
|
||||
|
||||
if (peripheral.cmd & SPI_RX) {
|
||||
dmaengine_slave_config(mas->rx, &config);
|
||||
rx_desc = dmaengine_prep_slave_sg(mas->rx, xfer->rx_sg.sgl, xfer->rx_sg.nents,
|
||||
DMA_DEV_TO_MEM, flags);
|
||||
if (!rx_desc) {
|
||||
dev_err(mas->dev, "Err setting up rx desc\n");
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare the TX always, even for RX or tx_buf being null, we would
|
||||
* need TX to be prepared per GSI spec
|
||||
*/
|
||||
dmaengine_slave_config(mas->tx, &config);
|
||||
tx_desc = dmaengine_prep_slave_sg(mas->tx, xfer->tx_sg.sgl, xfer->tx_sg.nents,
|
||||
DMA_MEM_TO_DEV, flags);
|
||||
if (!tx_desc) {
|
||||
dev_err(mas->dev, "Err setting up tx desc\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
tx_desc->callback_result = spi_gsi_callback_result;
|
||||
tx_desc->callback_param = spi;
|
||||
|
||||
if (peripheral.cmd & SPI_RX)
|
||||
dmaengine_submit(rx_desc);
|
||||
dmaengine_submit(tx_desc);
|
||||
|
||||
if (peripheral.cmd & SPI_RX)
|
||||
dma_async_issue_pending(mas->rx);
|
||||
|
||||
dma_async_issue_pending(mas->tx);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static bool geni_can_dma(struct spi_controller *ctlr,
|
||||
struct spi_device *slv, struct spi_transfer *xfer)
|
||||
{
|
||||
struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
|
||||
|
||||
/* check if dma is supported */
|
||||
return mas->cur_xfer_mode != GENI_SE_FIFO;
|
||||
}
|
||||
|
||||
static int spi_geni_prepare_message(struct spi_master *spi,
|
||||
struct spi_message *spi_msg)
|
||||
{
|
||||
int ret;
|
||||
struct spi_geni_master *mas = spi_master_get_devdata(spi);
|
||||
int ret;
|
||||
|
||||
if (spi_geni_is_abort_still_pending(mas))
|
||||
return -EBUSY;
|
||||
switch (mas->cur_xfer_mode) {
|
||||
case GENI_SE_FIFO:
|
||||
if (spi_geni_is_abort_still_pending(mas))
|
||||
return -EBUSY;
|
||||
ret = setup_fifo_params(spi_msg->spi, spi);
|
||||
if (ret)
|
||||
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
|
||||
return ret;
|
||||
|
||||
ret = setup_fifo_params(spi_msg->spi, spi);
|
||||
if (ret)
|
||||
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
|
||||
case GENI_GPI_DMA:
|
||||
/* nothing to do for GPI DMA */
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev_err(mas->dev, "Mode not supported %d", mas->cur_xfer_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mas->tx = dma_request_chan(mas->dev, "tx");
|
||||
ret = dev_err_probe(mas->dev, IS_ERR(mas->tx), "Failed to get tx DMA ch\n");
|
||||
if (ret < 0)
|
||||
goto err_tx;
|
||||
|
||||
mas->rx = dma_request_chan(mas->dev, "rx");
|
||||
ret = dev_err_probe(mas->dev, IS_ERR(mas->rx), "Failed to get rx DMA ch\n");
|
||||
if (ret < 0)
|
||||
goto err_rx;
|
||||
|
||||
return 0;
|
||||
|
||||
err_rx:
|
||||
dma_release_channel(mas->tx);
|
||||
mas->tx = NULL;
|
||||
err_tx:
|
||||
mas->rx = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void spi_geni_release_dma_chan(struct spi_geni_master *mas)
|
||||
{
|
||||
if (mas->rx) {
|
||||
dma_release_channel(mas->rx);
|
||||
mas->rx = NULL;
|
||||
}
|
||||
|
||||
if (mas->tx) {
|
||||
dma_release_channel(mas->tx);
|
||||
mas->tx = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int spi_geni_init(struct spi_geni_master *mas)
|
||||
{
|
||||
struct geni_se *se = &mas->se;
|
||||
unsigned int proto, major, minor, ver;
|
||||
u32 spi_tx_cfg;
|
||||
u32 spi_tx_cfg, fifo_disable;
|
||||
int ret = -ENXIO;
|
||||
|
||||
pm_runtime_get_sync(mas->dev);
|
||||
|
||||
proto = geni_se_read_proto(se);
|
||||
if (proto != GENI_SE_SPI) {
|
||||
dev_err(mas->dev, "Invalid proto %d\n", proto);
|
||||
pm_runtime_put(mas->dev);
|
||||
return -ENXIO;
|
||||
goto out_pm;
|
||||
}
|
||||
mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se);
|
||||
|
||||
@@ -380,15 +558,38 @@ static int spi_geni_init(struct spi_geni_master *mas)
|
||||
else
|
||||
mas->oversampling = 1;
|
||||
|
||||
geni_se_select_mode(se, GENI_SE_FIFO);
|
||||
fifo_disable = readl(se->base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
|
||||
switch (fifo_disable) {
|
||||
case 1:
|
||||
ret = spi_geni_grab_gpi_chan(mas);
|
||||
if (!ret) { /* success case */
|
||||
mas->cur_xfer_mode = GENI_GPI_DMA;
|
||||
geni_se_select_mode(se, GENI_GPI_DMA);
|
||||
dev_dbg(mas->dev, "Using GPI DMA mode for SPI\n");
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* in case of failure to get dma channel, we can still do the
|
||||
* FIFO mode, so fallthrough
|
||||
*/
|
||||
dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n");
|
||||
fallthrough;
|
||||
|
||||
case 0:
|
||||
mas->cur_xfer_mode = GENI_SE_FIFO;
|
||||
geni_se_select_mode(se, GENI_SE_FIFO);
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* We always control CS manually */
|
||||
spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
|
||||
spi_tx_cfg &= ~CS_TOGGLE;
|
||||
writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
|
||||
|
||||
out_pm:
|
||||
pm_runtime_put(mas->dev);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
|
||||
@@ -569,8 +770,11 @@ static int spi_geni_transfer_one(struct spi_master *spi,
|
||||
if (!xfer->len)
|
||||
return 0;
|
||||
|
||||
setup_fifo_xfer(xfer, mas, slv->mode, spi);
|
||||
return 1;
|
||||
if (mas->cur_xfer_mode == GENI_SE_FIFO) {
|
||||
setup_fifo_xfer(xfer, mas, slv->mode, spi);
|
||||
return 1;
|
||||
}
|
||||
return setup_gsi_xfer(xfer, mas, slv, spi);
|
||||
}
|
||||
|
||||
static irqreturn_t geni_spi_isr(int irq, void *data)
|
||||
@@ -665,6 +869,13 @@ static int spi_geni_probe(struct platform_device *pdev)
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
|
||||
if (ret) {
|
||||
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "could not set DMA mask\n");
|
||||
}
|
||||
|
||||
base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
@@ -704,9 +915,10 @@ static int spi_geni_probe(struct platform_device *pdev)
|
||||
spi->max_speed_hz = 50000000;
|
||||
spi->prepare_message = spi_geni_prepare_message;
|
||||
spi->transfer_one = spi_geni_transfer_one;
|
||||
spi->can_dma = geni_can_dma;
|
||||
spi->dma_map_dev = dev->parent;
|
||||
spi->auto_runtime_pm = true;
|
||||
spi->handle_err = handle_fifo_timeout;
|
||||
spi->set_cs = spi_geni_set_cs;
|
||||
spi->use_gpio_descriptors = true;
|
||||
|
||||
init_completion(&mas->cs_done);
|
||||
@@ -732,9 +944,17 @@ static int spi_geni_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto spi_geni_probe_runtime_disable;
|
||||
|
||||
/*
|
||||
* check the mode supported and set_cs for fifo mode only
|
||||
* for dma (gsi) mode, the gsi will set cs based on params passed in
|
||||
* TRE
|
||||
*/
|
||||
if (mas->cur_xfer_mode == GENI_SE_FIFO)
|
||||
spi->set_cs = spi_geni_set_cs;
|
||||
|
||||
ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
|
||||
if (ret)
|
||||
goto spi_geni_probe_runtime_disable;
|
||||
goto spi_geni_release_dma;
|
||||
|
||||
ret = spi_register_master(spi);
|
||||
if (ret)
|
||||
@@ -743,6 +963,8 @@ static int spi_geni_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
spi_geni_probe_free_irq:
|
||||
free_irq(mas->irq, spi);
|
||||
spi_geni_release_dma:
|
||||
spi_geni_release_dma_chan(mas);
|
||||
spi_geni_probe_runtime_disable:
|
||||
pm_runtime_disable(dev);
|
||||
return ret;
|
||||
@@ -756,6 +978,8 @@ static int spi_geni_remove(struct platform_device *pdev)
|
||||
/* Unregister _before_ disabling pm_runtime() so we stop transfers */
|
||||
spi_unregister_master(spi);
|
||||
|
||||
spi_geni_release_dma_chan(mas);
|
||||
|
||||
free_irq(mas->irq, spi);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
return 0;
|
||||
|
||||
482
drivers/spi/spi-ingenic.c
Normal file
482
drivers/spi/spi-ingenic.c
Normal file
@@ -0,0 +1,482 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* SPI bus driver for the Ingenic JZ47xx SoCs
|
||||
* Copyright (c) 2017-2021 Artur Rojek <contact@artur-rojek.eu>
|
||||
* Copyright (c) 2017-2021 Paul Cercueil <paul@crapouillou.net>
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/spi/spi.h>
|
||||
|
||||
#define REG_SSIDR 0x0
|
||||
#define REG_SSICR0 0x4
|
||||
#define REG_SSICR1 0x8
|
||||
#define REG_SSISR 0xc
|
||||
#define REG_SSIGR 0x18
|
||||
|
||||
#define REG_SSICR0_TENDIAN_LSB BIT(19)
|
||||
#define REG_SSICR0_RENDIAN_LSB BIT(17)
|
||||
#define REG_SSICR0_SSIE BIT(15)
|
||||
#define REG_SSICR0_LOOP BIT(10)
|
||||
#define REG_SSICR0_EACLRUN BIT(7)
|
||||
#define REG_SSICR0_FSEL BIT(6)
|
||||
#define REG_SSICR0_TFLUSH BIT(2)
|
||||
#define REG_SSICR0_RFLUSH BIT(1)
|
||||
|
||||
#define REG_SSICR1_FRMHL_MASK (BIT(31) | BIT(30))
|
||||
#define REG_SSICR1_FRMHL BIT(30)
|
||||
#define REG_SSICR1_LFST BIT(25)
|
||||
#define REG_SSICR1_UNFIN BIT(23)
|
||||
#define REG_SSICR1_PHA BIT(1)
|
||||
#define REG_SSICR1_POL BIT(0)
|
||||
|
||||
#define REG_SSISR_END BIT(7)
|
||||
#define REG_SSISR_BUSY BIT(6)
|
||||
#define REG_SSISR_TFF BIT(5)
|
||||
#define REG_SSISR_RFE BIT(4)
|
||||
#define REG_SSISR_RFHF BIT(2)
|
||||
#define REG_SSISR_UNDR BIT(1)
|
||||
#define REG_SSISR_OVER BIT(0)
|
||||
|
||||
#define SPI_INGENIC_FIFO_SIZE 128u
|
||||
|
||||
struct jz_soc_info {
|
||||
u32 bits_per_word_mask;
|
||||
struct reg_field flen_field;
|
||||
bool has_trendian;
|
||||
};
|
||||
|
||||
struct ingenic_spi {
|
||||
const struct jz_soc_info *soc_info;
|
||||
struct clk *clk;
|
||||
struct resource *mem_res;
|
||||
|
||||
struct regmap *map;
|
||||
struct regmap_field *flen_field;
|
||||
};
|
||||
|
||||
static int spi_ingenic_wait(struct ingenic_spi *priv,
|
||||
unsigned long mask,
|
||||
bool condition)
|
||||
{
|
||||
unsigned int val;
|
||||
|
||||
return regmap_read_poll_timeout(priv->map, REG_SSISR, val,
|
||||
!!(val & mask) == condition,
|
||||
100, 10000);
|
||||
}
|
||||
|
||||
static void spi_ingenic_set_cs(struct spi_device *spi, bool disable)
|
||||
{
|
||||
struct ingenic_spi *priv = spi_controller_get_devdata(spi->controller);
|
||||
|
||||
if (disable) {
|
||||
regmap_clear_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
|
||||
regmap_clear_bits(priv->map, REG_SSISR,
|
||||
REG_SSISR_UNDR | REG_SSISR_OVER);
|
||||
|
||||
spi_ingenic_wait(priv, REG_SSISR_END, true);
|
||||
} else {
|
||||
regmap_set_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
|
||||
}
|
||||
|
||||
regmap_set_bits(priv->map, REG_SSICR0,
|
||||
REG_SSICR0_RFLUSH | REG_SSICR0_TFLUSH);
|
||||
}
|
||||
|
||||
static void spi_ingenic_prepare_transfer(struct ingenic_spi *priv,
|
||||
struct spi_device *spi,
|
||||
struct spi_transfer *xfer)
|
||||
{
|
||||
unsigned long clk_hz = clk_get_rate(priv->clk);
|
||||
u32 cdiv, speed_hz = xfer->speed_hz ?: spi->max_speed_hz,
|
||||
bits_per_word = xfer->bits_per_word ?: spi->bits_per_word;
|
||||
|
||||
cdiv = clk_hz / (speed_hz * 2);
|
||||
cdiv = clamp(cdiv, 1u, 0x100u) - 1;
|
||||
|
||||
regmap_write(priv->map, REG_SSIGR, cdiv);
|
||||
|
||||
regmap_field_write(priv->flen_field, bits_per_word - 2);
|
||||
}
|
||||
|
||||
static void spi_ingenic_finalize_transfer(void *controller)
|
||||
{
|
||||
spi_finalize_current_transfer(controller);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
spi_ingenic_prepare_dma(struct spi_controller *ctlr, struct dma_chan *chan,
|
||||
struct sg_table *sg, enum dma_transfer_direction dir,
|
||||
unsigned int bits)
|
||||
{
|
||||
struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
|
||||
struct dma_slave_config cfg = {
|
||||
.direction = dir,
|
||||
.src_addr = priv->mem_res->start + REG_SSIDR,
|
||||
.dst_addr = priv->mem_res->start + REG_SSIDR,
|
||||
};
|
||||
struct dma_async_tx_descriptor *desc;
|
||||
dma_cookie_t cookie;
|
||||
int ret;
|
||||
|
||||
if (bits > 16) {
|
||||
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
cfg.src_maxburst = cfg.dst_maxburst = 4;
|
||||
} else if (bits > 8) {
|
||||
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
||||
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
||||
cfg.src_maxburst = cfg.dst_maxburst = 2;
|
||||
} else {
|
||||
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
||||
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
||||
cfg.src_maxburst = cfg.dst_maxburst = 1;
|
||||
}
|
||||
|
||||
ret = dmaengine_slave_config(chan, &cfg);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
desc = dmaengine_prep_slave_sg(chan, sg->sgl, sg->nents, dir,
|
||||
DMA_PREP_INTERRUPT);
|
||||
if (!desc)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (dir == DMA_DEV_TO_MEM) {
|
||||
desc->callback = spi_ingenic_finalize_transfer;
|
||||
desc->callback_param = ctlr;
|
||||
}
|
||||
|
||||
cookie = dmaengine_submit(desc);
|
||||
|
||||
ret = dma_submit_error(cookie);
|
||||
if (ret) {
|
||||
dmaengine_desc_free(desc);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
||||
static int spi_ingenic_dma_tx(struct spi_controller *ctlr,
|
||||
struct spi_transfer *xfer, unsigned int bits)
|
||||
{
|
||||
struct dma_async_tx_descriptor *rx_desc, *tx_desc;
|
||||
|
||||
rx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_rx,
|
||||
&xfer->rx_sg, DMA_DEV_TO_MEM, bits);
|
||||
if (IS_ERR(rx_desc))
|
||||
return PTR_ERR(rx_desc);
|
||||
|
||||
tx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_tx,
|
||||
&xfer->tx_sg, DMA_MEM_TO_DEV, bits);
|
||||
if (IS_ERR(tx_desc)) {
|
||||
dmaengine_terminate_async(ctlr->dma_rx);
|
||||
dmaengine_desc_free(rx_desc);
|
||||
return PTR_ERR(tx_desc);
|
||||
}
|
||||
|
||||
dma_async_issue_pending(ctlr->dma_rx);
|
||||
dma_async_issue_pending(ctlr->dma_tx);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define SPI_INGENIC_TX(x) \
|
||||
static int spi_ingenic_tx##x(struct ingenic_spi *priv, \
|
||||
struct spi_transfer *xfer) \
|
||||
{ \
|
||||
unsigned int count = xfer->len / (x / 8); \
|
||||
unsigned int prefill = min(count, SPI_INGENIC_FIFO_SIZE); \
|
||||
const u##x *tx_buf = xfer->tx_buf; \
|
||||
u##x *rx_buf = xfer->rx_buf; \
|
||||
unsigned int i, val; \
|
||||
int err; \
|
||||
\
|
||||
/* Fill up the TX fifo */ \
|
||||
for (i = 0; i < prefill; i++) { \
|
||||
val = tx_buf ? tx_buf[i] : 0; \
|
||||
\
|
||||
regmap_write(priv->map, REG_SSIDR, val); \
|
||||
} \
|
||||
\
|
||||
for (i = 0; i < count; i++) { \
|
||||
err = spi_ingenic_wait(priv, REG_SSISR_RFE, false); \
|
||||
if (err) \
|
||||
return err; \
|
||||
\
|
||||
regmap_read(priv->map, REG_SSIDR, &val); \
|
||||
if (rx_buf) \
|
||||
rx_buf[i] = val; \
|
||||
\
|
||||
if (i < count - prefill) { \
|
||||
val = tx_buf ? tx_buf[i + prefill] : 0; \
|
||||
\
|
||||
regmap_write(priv->map, REG_SSIDR, val); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
return 0; \
|
||||
}
|
||||
SPI_INGENIC_TX(8)
|
||||
SPI_INGENIC_TX(16)
|
||||
SPI_INGENIC_TX(32)
|
||||
#undef SPI_INGENIC_TX
|
||||
|
||||
static int spi_ingenic_transfer_one(struct spi_controller *ctlr,
|
||||
struct spi_device *spi,
|
||||
struct spi_transfer *xfer)
|
||||
{
|
||||
struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
|
||||
unsigned int bits = xfer->bits_per_word ?: spi->bits_per_word;
|
||||
bool can_dma = ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer);
|
||||
|
||||
spi_ingenic_prepare_transfer(priv, spi, xfer);
|
||||
|
||||
if (ctlr->cur_msg_mapped && can_dma)
|
||||
return spi_ingenic_dma_tx(ctlr, xfer, bits);
|
||||
|
||||
if (bits > 16)
|
||||
return spi_ingenic_tx32(priv, xfer);
|
||||
|
||||
if (bits > 8)
|
||||
return spi_ingenic_tx16(priv, xfer);
|
||||
|
||||
return spi_ingenic_tx8(priv, xfer);
|
||||
}
|
||||
|
||||
static int spi_ingenic_prepare_message(struct spi_controller *ctlr,
|
||||
struct spi_message *message)
|
||||
{
|
||||
struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
|
||||
struct spi_device *spi = message->spi;
|
||||
unsigned int cs = REG_SSICR1_FRMHL << spi->chip_select;
|
||||
unsigned int ssicr0_mask = REG_SSICR0_LOOP | REG_SSICR0_FSEL;
|
||||
unsigned int ssicr1_mask = REG_SSICR1_PHA | REG_SSICR1_POL | cs;
|
||||
unsigned int ssicr0 = 0, ssicr1 = 0;
|
||||
|
||||
if (priv->soc_info->has_trendian) {
|
||||
ssicr0_mask |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
|
||||
|
||||
if (spi->mode & SPI_LSB_FIRST)
|
||||
ssicr0 |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
|
||||
} else {
|
||||
ssicr1_mask |= REG_SSICR1_LFST;
|
||||
|
||||
if (spi->mode & SPI_LSB_FIRST)
|
||||
ssicr1 |= REG_SSICR1_LFST;
|
||||
}
|
||||
|
||||
if (spi->mode & SPI_LOOP)
|
||||
ssicr0 |= REG_SSICR0_LOOP;
|
||||
if (spi->chip_select)
|
||||
ssicr0 |= REG_SSICR0_FSEL;
|
||||
|
||||
if (spi->mode & SPI_CPHA)
|
||||
ssicr1 |= REG_SSICR1_PHA;
|
||||
if (spi->mode & SPI_CPOL)
|
||||
ssicr1 |= REG_SSICR1_POL;
|
||||
if (spi->mode & SPI_CS_HIGH)
|
||||
ssicr1 |= cs;
|
||||
|
||||
regmap_update_bits(priv->map, REG_SSICR0, ssicr0_mask, ssicr0);
|
||||
regmap_update_bits(priv->map, REG_SSICR1, ssicr1_mask, ssicr1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int spi_ingenic_prepare_hardware(struct spi_controller *ctlr)
|
||||
{
|
||||
struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
|
||||
int ret;
|
||||
|
||||
ret = clk_prepare_enable(priv->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
regmap_write(priv->map, REG_SSICR0, REG_SSICR0_EACLRUN);
|
||||
regmap_write(priv->map, REG_SSICR1, 0);
|
||||
regmap_write(priv->map, REG_SSISR, 0);
|
||||
regmap_set_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int spi_ingenic_unprepare_hardware(struct spi_controller *ctlr)
|
||||
{
|
||||
struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
|
||||
|
||||
regmap_clear_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
|
||||
|
||||
clk_disable_unprepare(priv->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool spi_ingenic_can_dma(struct spi_controller *ctlr,
|
||||
struct spi_device *spi,
|
||||
struct spi_transfer *xfer)
|
||||
{
|
||||
struct dma_slave_caps caps;
|
||||
int ret;
|
||||
|
||||
ret = dma_get_slave_caps(ctlr->dma_tx, &caps);
|
||||
if (ret) {
|
||||
dev_err(&spi->dev, "Unable to get slave caps: %d\n", ret);
|
||||
return false;
|
||||
}
|
||||
|
||||
return !caps.max_sg_burst ||
|
||||
xfer->len <= caps.max_sg_burst * SPI_INGENIC_FIFO_SIZE;
|
||||
}
|
||||
|
||||
static int spi_ingenic_request_dma(struct spi_controller *ctlr,
|
||||
struct device *dev)
|
||||
{
|
||||
ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
|
||||
if (!ctlr->dma_tx)
|
||||
return -ENODEV;
|
||||
|
||||
ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
|
||||
|
||||
if (!ctlr->dma_rx)
|
||||
return -ENODEV;
|
||||
|
||||
ctlr->can_dma = spi_ingenic_can_dma;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void spi_ingenic_release_dma(void *data)
|
||||
{
|
||||
struct spi_controller *ctlr = data;
|
||||
|
||||
if (ctlr->dma_tx)
|
||||
dma_release_channel(ctlr->dma_tx);
|
||||
if (ctlr->dma_rx)
|
||||
dma_release_channel(ctlr->dma_rx);
|
||||
}
|
||||
|
||||
static const struct regmap_config spi_ingenic_regmap_config = {
|
||||
.reg_bits = 32,
|
||||
.val_bits = 32,
|
||||
.reg_stride = 4,
|
||||
.max_register = REG_SSIGR,
|
||||
};
|
||||
|
||||
static int spi_ingenic_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct jz_soc_info *pdata;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct spi_controller *ctlr;
|
||||
struct ingenic_spi *priv;
|
||||
void __iomem *base;
|
||||
int ret;
|
||||
|
||||
pdata = of_device_get_match_data(dev);
|
||||
if (!pdata) {
|
||||
dev_err(dev, "Missing platform data.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctlr = devm_spi_alloc_master(dev, sizeof(*priv));
|
||||
if (!ctlr) {
|
||||
dev_err(dev, "Unable to allocate SPI controller.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
priv = spi_controller_get_devdata(ctlr);
|
||||
priv->soc_info = pdata;
|
||||
|
||||
priv->clk = devm_clk_get(dev, NULL);
|
||||
if (IS_ERR(priv->clk)) {
|
||||
return dev_err_probe(dev, PTR_ERR(priv->clk),
|
||||
"Unable to get clock.\n");
|
||||
}
|
||||
|
||||
base = devm_platform_get_and_ioremap_resource(pdev, 0, &priv->mem_res);
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
|
||||
priv->map = devm_regmap_init_mmio(dev, base, &spi_ingenic_regmap_config);
|
||||
if (IS_ERR(priv->map))
|
||||
return PTR_ERR(priv->map);
|
||||
|
||||
priv->flen_field = devm_regmap_field_alloc(dev, priv->map,
|
||||
pdata->flen_field);
|
||||
if (IS_ERR(priv->flen_field))
|
||||
return PTR_ERR(priv->flen_field);
|
||||
|
||||
platform_set_drvdata(pdev, ctlr);
|
||||
|
||||
ctlr->prepare_transfer_hardware = spi_ingenic_prepare_hardware;
|
||||
ctlr->unprepare_transfer_hardware = spi_ingenic_unprepare_hardware;
|
||||
ctlr->prepare_message = spi_ingenic_prepare_message;
|
||||
ctlr->set_cs = spi_ingenic_set_cs;
|
||||
ctlr->transfer_one = spi_ingenic_transfer_one;
|
||||
ctlr->mode_bits = SPI_MODE_3 | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH;
|
||||
ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
|
||||
ctlr->max_dma_len = SPI_INGENIC_FIFO_SIZE;
|
||||
ctlr->bits_per_word_mask = pdata->bits_per_word_mask;
|
||||
ctlr->min_speed_hz = 7200;
|
||||
ctlr->max_speed_hz = 54000000;
|
||||
ctlr->num_chipselect = 2;
|
||||
ctlr->dev.of_node = pdev->dev.of_node;
|
||||
|
||||
if (spi_ingenic_request_dma(ctlr, dev))
|
||||
dev_warn(dev, "DMA not available.\n");
|
||||
|
||||
ret = devm_add_action_or_reset(dev, spi_ingenic_release_dma, ctlr);
|
||||
if (ret) {
|
||||
dev_err(dev, "Unable to add action.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = devm_spi_register_controller(dev, ctlr);
|
||||
if (ret)
|
||||
dev_err(dev, "Unable to register SPI controller.\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct jz_soc_info jz4750_soc_info = {
|
||||
.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 17),
|
||||
.flen_field = REG_FIELD(REG_SSICR1, 4, 7),
|
||||
.has_trendian = false,
|
||||
};
|
||||
|
||||
static const struct jz_soc_info jz4780_soc_info = {
|
||||
.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
|
||||
.flen_field = REG_FIELD(REG_SSICR1, 3, 7),
|
||||
.has_trendian = true,
|
||||
};
|
||||
|
||||
static const struct of_device_id spi_ingenic_of_match[] = {
|
||||
{ .compatible = "ingenic,jz4750-spi", .data = &jz4750_soc_info },
|
||||
{ .compatible = "ingenic,jz4780-spi", .data = &jz4780_soc_info },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, spi_ingenic_of_match);
|
||||
|
||||
static struct platform_driver spi_ingenic_driver = {
|
||||
.driver = {
|
||||
.name = "spi-ingenic",
|
||||
.of_match_table = spi_ingenic_of_match,
|
||||
},
|
||||
.probe = spi_ingenic_probe,
|
||||
};
|
||||
|
||||
module_platform_driver(spi_ingenic_driver);
|
||||
MODULE_DESCRIPTION("SPI bus driver for the Ingenic JZ47xx SoCs");
|
||||
MODULE_AUTHOR("Artur Rojek <contact@artur-rojek.eu>");
|
||||
MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
|
||||
MODULE_LICENSE("GPL");
|
||||
@@ -160,7 +160,7 @@ static bool mtk_nor_match_read(const struct spi_mem_op *op)
|
||||
{
|
||||
int dummy = 0;
|
||||
|
||||
if (op->dummy.buswidth)
|
||||
if (op->dummy.nbytes)
|
||||
dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth;
|
||||
|
||||
if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) {
|
||||
|
||||
@@ -769,6 +769,7 @@ static int orion_spi_probe(struct platform_device *pdev)
|
||||
dir_acc->vaddr = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
|
||||
if (!dir_acc->vaddr) {
|
||||
status = -ENOMEM;
|
||||
of_node_put(np);
|
||||
goto out_rel_axi_clk;
|
||||
}
|
||||
dir_acc->size = PAGE_SIZE;
|
||||
|
||||
@@ -139,7 +139,9 @@ static int rpcif_spi_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
|
||||
rpc = spi_controller_get_devdata(ctlr);
|
||||
rpcif_sw_init(rpc, parent);
|
||||
error = rpcif_sw_init(rpc, parent);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
platform_set_drvdata(pdev, ctlr);
|
||||
|
||||
|
||||
@@ -1427,4 +1427,3 @@ module_platform_driver(rspi_driver);
|
||||
MODULE_DESCRIPTION("Renesas RSPI bus driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("Yoshihiro Shimoda");
|
||||
MODULE_ALIAS("platform:rspi");
|
||||
|
||||
@@ -1426,4 +1426,3 @@ module_platform_driver(sh_msiof_spi_drv);
|
||||
MODULE_DESCRIPTION("SuperH MSIOF SPI Controller Interface Driver");
|
||||
MODULE_AUTHOR("Magnus Damm");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS("platform:spi_sh_msiof");
|
||||
|
||||
@@ -397,7 +397,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
|
||||
ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
|
||||
}
|
||||
|
||||
if (op->dummy.buswidth && op->dummy.nbytes)
|
||||
if (op->dummy.nbytes)
|
||||
ccr |= FIELD_PREP(CCR_DCYC_MASK,
|
||||
op->dummy.nbytes * 8 / op->dummy.buswidth);
|
||||
|
||||
|
||||
@@ -1124,7 +1124,7 @@ exit_free_irq:
|
||||
exit_pm_put:
|
||||
pm_runtime_put(&pdev->dev);
|
||||
exit_pm_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_force_suspend(&pdev->dev);
|
||||
|
||||
tegra_slink_deinit_dma_param(tspi, false);
|
||||
exit_rx_dma_free:
|
||||
@@ -1143,7 +1143,7 @@ static int tegra_slink_remove(struct platform_device *pdev)
|
||||
|
||||
free_irq(tspi->irq, tspi);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_force_suspend(&pdev->dev);
|
||||
|
||||
if (tspi->tx_dma_chan)
|
||||
tegra_slink_deinit_dma_param(tspi, false);
|
||||
|
||||
@@ -1318,7 +1318,7 @@ static int tegra_qspi_probe(struct platform_device *pdev)
|
||||
exit_free_irq:
|
||||
free_irq(qspi_irq, tqspi);
|
||||
exit_pm_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_force_suspend(&pdev->dev);
|
||||
tegra_qspi_deinit_dma(tqspi);
|
||||
return ret;
|
||||
}
|
||||
@@ -1330,7 +1330,7 @@ static int tegra_qspi_remove(struct platform_device *pdev)
|
||||
|
||||
spi_unregister_master(master);
|
||||
free_irq(tqspi->irq, tqspi);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_force_suspend(&pdev->dev);
|
||||
tegra_qspi_deinit_dma(tqspi);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -141,7 +141,7 @@ static ssize_t tle62x0_gpio_show(struct device *dev,
|
||||
value = (st->gpio_state >> gpio_num) & 1;
|
||||
mutex_unlock(&st->lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d", value);
|
||||
return sysfs_emit(buf, "%d", value);
|
||||
}
|
||||
|
||||
static ssize_t tle62x0_gpio_store(struct device *dev,
|
||||
|
||||
@@ -285,9 +285,9 @@ static const struct attribute_group *spi_master_groups[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
|
||||
struct spi_transfer *xfer,
|
||||
struct spi_controller *ctlr)
|
||||
static void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
|
||||
struct spi_transfer *xfer,
|
||||
struct spi_controller *ctlr)
|
||||
{
|
||||
unsigned long flags;
|
||||
int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
|
||||
@@ -310,7 +310,6 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
|
||||
|
||||
spin_unlock_irqrestore(&stats->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
|
||||
|
||||
/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
|
||||
* and the sysfs version makes coldplug work too.
|
||||
@@ -536,7 +535,7 @@ static DEFINE_MUTEX(board_lock);
|
||||
*
|
||||
* Return: a pointer to the new device, or NULL.
|
||||
*/
|
||||
struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
|
||||
static struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
|
||||
{
|
||||
struct spi_device *spi;
|
||||
|
||||
@@ -561,7 +560,6 @@ struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
|
||||
device_initialize(&spi->dev);
|
||||
return spi;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_alloc_device);
|
||||
|
||||
static void spi_dev_set_name(struct spi_device *spi)
|
||||
{
|
||||
@@ -599,6 +597,11 @@ static int __spi_add_device(struct spi_device *spi)
|
||||
struct device *dev = ctlr->dev.parent;
|
||||
int status;
|
||||
|
||||
/*
|
||||
* We need to make sure there's no other device with this
|
||||
* chipselect **BEFORE** we call setup(), else we'll trash
|
||||
* its configuration.
|
||||
*/
|
||||
status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
|
||||
if (status) {
|
||||
dev_err(dev, "chipselect %d already in use\n",
|
||||
@@ -651,7 +654,7 @@ static int __spi_add_device(struct spi_device *spi)
|
||||
*
|
||||
* Return: 0 on success; negative errno on failure
|
||||
*/
|
||||
int spi_add_device(struct spi_device *spi)
|
||||
static int spi_add_device(struct spi_device *spi)
|
||||
{
|
||||
struct spi_controller *ctlr = spi->controller;
|
||||
struct device *dev = ctlr->dev.parent;
|
||||
@@ -667,16 +670,11 @@ int spi_add_device(struct spi_device *spi)
|
||||
/* Set the bus ID string */
|
||||
spi_dev_set_name(spi);
|
||||
|
||||
/* We need to make sure there's no other device with this
|
||||
* chipselect **BEFORE** we call setup(), else we'll trash
|
||||
* its configuration. Lock against concurrent add() calls.
|
||||
*/
|
||||
mutex_lock(&ctlr->add_lock);
|
||||
status = __spi_add_device(spi);
|
||||
mutex_unlock(&ctlr->add_lock);
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_add_device);
|
||||
|
||||
static int spi_add_device_locked(struct spi_device *spi)
|
||||
{
|
||||
@@ -851,6 +849,87 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
/* Core methods for SPI resource management */
|
||||
|
||||
/**
|
||||
* spi_res_alloc - allocate a spi resource that is life-cycle managed
|
||||
* during the processing of a spi_message while using
|
||||
* spi_transfer_one
|
||||
* @spi: the spi device for which we allocate memory
|
||||
* @release: the release code to execute for this resource
|
||||
* @size: size to alloc and return
|
||||
* @gfp: GFP allocation flags
|
||||
*
|
||||
* Return: the pointer to the allocated data
|
||||
*
|
||||
* This may get enhanced in the future to allocate from a memory pool
|
||||
* of the @spi_device or @spi_controller to avoid repeated allocations.
|
||||
*/
|
||||
static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
|
||||
size_t size, gfp_t gfp)
|
||||
{
|
||||
struct spi_res *sres;
|
||||
|
||||
sres = kzalloc(sizeof(*sres) + size, gfp);
|
||||
if (!sres)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&sres->entry);
|
||||
sres->release = release;
|
||||
|
||||
return sres->data;
|
||||
}
|
||||
|
||||
/**
|
||||
* spi_res_free - free an spi resource
|
||||
* @res: pointer to the custom data of a resource
|
||||
*
|
||||
*/
|
||||
static void spi_res_free(void *res)
|
||||
{
|
||||
struct spi_res *sres = container_of(res, struct spi_res, data);
|
||||
|
||||
if (!res)
|
||||
return;
|
||||
|
||||
WARN_ON(!list_empty(&sres->entry));
|
||||
kfree(sres);
|
||||
}
|
||||
|
||||
/**
|
||||
* spi_res_add - add a spi_res to the spi_message
|
||||
* @message: the spi message
|
||||
* @res: the spi_resource
|
||||
*/
|
||||
static void spi_res_add(struct spi_message *message, void *res)
|
||||
{
|
||||
struct spi_res *sres = container_of(res, struct spi_res, data);
|
||||
|
||||
WARN_ON(!list_empty(&sres->entry));
|
||||
list_add_tail(&sres->entry, &message->resources);
|
||||
}
|
||||
|
||||
/**
|
||||
* spi_res_release - release all spi resources for this message
|
||||
* @ctlr: the @spi_controller
|
||||
* @message: the @spi_message
|
||||
*/
|
||||
static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
|
||||
{
|
||||
struct spi_res *res, *tmp;
|
||||
|
||||
list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
|
||||
if (res->release)
|
||||
res->release(ctlr, message, res->data);
|
||||
|
||||
list_del(&res->entry);
|
||||
|
||||
kfree(res);
|
||||
}
|
||||
}
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
|
||||
{
|
||||
bool activate = enable;
|
||||
@@ -3068,127 +3147,6 @@ int spi_controller_resume(struct spi_controller *ctlr)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_controller_resume);
|
||||
|
||||
static int __spi_controller_match(struct device *dev, const void *data)
|
||||
{
|
||||
struct spi_controller *ctlr;
|
||||
const u16 *bus_num = data;
|
||||
|
||||
ctlr = container_of(dev, struct spi_controller, dev);
|
||||
return ctlr->bus_num == *bus_num;
|
||||
}
|
||||
|
||||
/**
|
||||
* spi_busnum_to_master - look up master associated with bus_num
|
||||
* @bus_num: the master's bus number
|
||||
* Context: can sleep
|
||||
*
|
||||
* This call may be used with devices that are registered after
|
||||
* arch init time. It returns a refcounted pointer to the relevant
|
||||
* spi_controller (which the caller must release), or NULL if there is
|
||||
* no such master registered.
|
||||
*
|
||||
* Return: the SPI master structure on success, else NULL.
|
||||
*/
|
||||
struct spi_controller *spi_busnum_to_master(u16 bus_num)
|
||||
{
|
||||
struct device *dev;
|
||||
struct spi_controller *ctlr = NULL;
|
||||
|
||||
dev = class_find_device(&spi_master_class, NULL, &bus_num,
|
||||
__spi_controller_match);
|
||||
if (dev)
|
||||
ctlr = container_of(dev, struct spi_controller, dev);
|
||||
/* reference got in class_find_device */
|
||||
return ctlr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_busnum_to_master);
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
/* Core methods for SPI resource management */
|
||||
|
||||
/**
|
||||
* spi_res_alloc - allocate a spi resource that is life-cycle managed
|
||||
* during the processing of a spi_message while using
|
||||
* spi_transfer_one
|
||||
* @spi: the spi device for which we allocate memory
|
||||
* @release: the release code to execute for this resource
|
||||
* @size: size to alloc and return
|
||||
* @gfp: GFP allocation flags
|
||||
*
|
||||
* Return: the pointer to the allocated data
|
||||
*
|
||||
* This may get enhanced in the future to allocate from a memory pool
|
||||
* of the @spi_device or @spi_controller to avoid repeated allocations.
|
||||
*/
|
||||
void *spi_res_alloc(struct spi_device *spi,
|
||||
spi_res_release_t release,
|
||||
size_t size, gfp_t gfp)
|
||||
{
|
||||
struct spi_res *sres;
|
||||
|
||||
sres = kzalloc(sizeof(*sres) + size, gfp);
|
||||
if (!sres)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&sres->entry);
|
||||
sres->release = release;
|
||||
|
||||
return sres->data;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_res_alloc);
|
||||
|
||||
/**
|
||||
* spi_res_free - free an spi resource
|
||||
* @res: pointer to the custom data of a resource
|
||||
*
|
||||
*/
|
||||
void spi_res_free(void *res)
|
||||
{
|
||||
struct spi_res *sres = container_of(res, struct spi_res, data);
|
||||
|
||||
if (!res)
|
||||
return;
|
||||
|
||||
WARN_ON(!list_empty(&sres->entry));
|
||||
kfree(sres);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_res_free);
|
||||
|
||||
/**
|
||||
* spi_res_add - add a spi_res to the spi_message
|
||||
* @message: the spi message
|
||||
* @res: the spi_resource
|
||||
*/
|
||||
void spi_res_add(struct spi_message *message, void *res)
|
||||
{
|
||||
struct spi_res *sres = container_of(res, struct spi_res, data);
|
||||
|
||||
WARN_ON(!list_empty(&sres->entry));
|
||||
list_add_tail(&sres->entry, &message->resources);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_res_add);
|
||||
|
||||
/**
|
||||
* spi_res_release - release all spi resources for this message
|
||||
* @ctlr: the @spi_controller
|
||||
* @message: the @spi_message
|
||||
*/
|
||||
void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
|
||||
{
|
||||
struct spi_res *res, *tmp;
|
||||
|
||||
list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
|
||||
if (res->release)
|
||||
res->release(ctlr, message, res->data);
|
||||
|
||||
list_del(&res->entry);
|
||||
|
||||
kfree(res);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_res_release);
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
/* Core methods for spi_message alterations */
|
||||
@@ -3227,7 +3185,7 @@ static void __spi_replace_transfers_release(struct spi_controller *ctlr,
|
||||
* Returns: pointer to @spi_replaced_transfers,
|
||||
* PTR_ERR(...) in case of errors.
|
||||
*/
|
||||
struct spi_replaced_transfers *spi_replace_transfers(
|
||||
static struct spi_replaced_transfers *spi_replace_transfers(
|
||||
struct spi_message *msg,
|
||||
struct spi_transfer *xfer_first,
|
||||
size_t remove,
|
||||
@@ -3319,7 +3277,6 @@ struct spi_replaced_transfers *spi_replace_transfers(
|
||||
|
||||
return rxfer;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_replace_transfers);
|
||||
|
||||
static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
|
||||
struct spi_message *msg,
|
||||
@@ -3869,7 +3826,7 @@ EXPORT_SYMBOL_GPL(spi_async);
|
||||
*
|
||||
* Return: zero on success, else a negative error code.
|
||||
*/
|
||||
int spi_async_locked(struct spi_device *spi, struct spi_message *message)
|
||||
static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
|
||||
{
|
||||
struct spi_controller *ctlr = spi->controller;
|
||||
int ret;
|
||||
@@ -3888,7 +3845,6 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message)
|
||||
return ret;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_async_locked);
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
@@ -4146,18 +4102,15 @@ EXPORT_SYMBOL_GPL(spi_write_then_read);
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
#if IS_ENABLED(CONFIG_OF)
|
||||
#if IS_ENABLED(CONFIG_OF_DYNAMIC)
|
||||
/* must call put_device() when done with returned spi_device device */
|
||||
struct spi_device *of_find_spi_device_by_node(struct device_node *node)
|
||||
static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
|
||||
{
|
||||
struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
|
||||
|
||||
return dev ? to_spi_device(dev) : NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
|
||||
#endif /* IS_ENABLED(CONFIG_OF) */
|
||||
|
||||
#if IS_ENABLED(CONFIG_OF_DYNAMIC)
|
||||
/* the spi controllers are not using spi_bus, so we find it with another way */
|
||||
static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
|
||||
{
|
||||
|
||||
@@ -123,6 +123,7 @@ enum pm_ioctl_id {
|
||||
IOCTL_READ_PGGS = 15,
|
||||
/* Set healthy bit value */
|
||||
IOCTL_SET_BOOT_HEALTH_STATUS = 17,
|
||||
IOCTL_OSPI_MUX_SELECT = 21,
|
||||
};
|
||||
|
||||
enum pm_query_id {
|
||||
@@ -351,6 +352,11 @@ enum zynqmp_pm_shutdown_subtype {
|
||||
ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM = 2,
|
||||
};
|
||||
|
||||
enum ospi_mux_select_type {
|
||||
PM_OSPI_MUX_SEL_DMA = 0,
|
||||
PM_OSPI_MUX_SEL_LINEAR = 1,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct zynqmp_pm_query_data - PM query data
|
||||
* @qid: query ID
|
||||
@@ -387,6 +393,7 @@ int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data);
|
||||
int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data);
|
||||
int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value);
|
||||
int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type);
|
||||
int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select);
|
||||
int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
|
||||
const enum zynqmp_pm_reset_action assert_flag);
|
||||
int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status);
|
||||
@@ -508,6 +515,11 @@ static inline int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
|
||||
const enum zynqmp_pm_reset_action assert_flag)
|
||||
{
|
||||
|
||||
@@ -78,10 +78,6 @@ struct spi_statistics {
|
||||
unsigned long transfers_split_maxsize;
|
||||
};
|
||||
|
||||
void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
|
||||
struct spi_transfer *xfer,
|
||||
struct spi_controller *ctlr);
|
||||
|
||||
#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
|
||||
do { \
|
||||
unsigned long flags; \
|
||||
@@ -763,8 +759,6 @@ extern int devm_spi_register_controller(struct device *dev,
|
||||
struct spi_controller *ctlr);
|
||||
extern void spi_unregister_controller(struct spi_controller *ctlr);
|
||||
|
||||
extern struct spi_controller *spi_busnum_to_master(u16 busnum);
|
||||
|
||||
/*
|
||||
* SPI resource management while processing a SPI message
|
||||
*/
|
||||
@@ -788,15 +782,6 @@ struct spi_res {
|
||||
unsigned long long data[]; /* guarantee ull alignment */
|
||||
};
|
||||
|
||||
extern void *spi_res_alloc(struct spi_device *spi,
|
||||
spi_res_release_t release,
|
||||
size_t size, gfp_t gfp);
|
||||
extern void spi_res_add(struct spi_message *message, void *res);
|
||||
extern void spi_res_free(void *res);
|
||||
|
||||
extern void spi_res_release(struct spi_controller *ctlr,
|
||||
struct spi_message *message);
|
||||
|
||||
/*---------------------------------------------------------------------------*/
|
||||
|
||||
/*
|
||||
@@ -1114,8 +1099,6 @@ static inline void spi_message_free(struct spi_message *m)
|
||||
|
||||
extern int spi_setup(struct spi_device *spi);
|
||||
extern int spi_async(struct spi_device *spi, struct spi_message *message);
|
||||
extern int spi_async_locked(struct spi_device *spi,
|
||||
struct spi_message *message);
|
||||
extern int spi_slave_abort(struct spi_device *spi);
|
||||
|
||||
static inline size_t
|
||||
@@ -1198,15 +1181,6 @@ struct spi_replaced_transfers {
|
||||
struct spi_transfer inserted_transfers[];
|
||||
};
|
||||
|
||||
extern struct spi_replaced_transfers *spi_replace_transfers(
|
||||
struct spi_message *msg,
|
||||
struct spi_transfer *xfer_first,
|
||||
size_t remove,
|
||||
size_t insert,
|
||||
spi_replaced_release_t release,
|
||||
size_t extradatasize,
|
||||
gfp_t gfp);
|
||||
|
||||
/*---------------------------------------------------------------------------*/
|
||||
|
||||
/* SPI transfer transformation methods */
|
||||
@@ -1478,19 +1452,7 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
|
||||
* use spi_new_device() to describe each device. You can also call
|
||||
* spi_unregister_device() to start making that device vanish, but
|
||||
* normally that would be handled by spi_unregister_controller().
|
||||
*
|
||||
* You can also use spi_alloc_device() and spi_add_device() to use a two
|
||||
* stage registration sequence for each spi_device. This gives the caller
|
||||
* some more control over the spi_device structure before it is registered,
|
||||
* but requires that caller to initialize fields that would otherwise
|
||||
* be defined using the board info.
|
||||
*/
|
||||
extern struct spi_device *
|
||||
spi_alloc_device(struct spi_controller *ctlr);
|
||||
|
||||
extern int
|
||||
spi_add_device(struct spi_device *spi);
|
||||
|
||||
extern struct spi_device *
|
||||
spi_new_device(struct spi_controller *, struct spi_board_info *);
|
||||
|
||||
@@ -1505,23 +1467,6 @@ spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
|
||||
return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers);
|
||||
}
|
||||
|
||||
/* OF support code */
|
||||
#if IS_ENABLED(CONFIG_OF)
|
||||
|
||||
/* must call put_device() when done with returned spi_device device */
|
||||
extern struct spi_device *
|
||||
of_find_spi_device_by_node(struct device_node *node);
|
||||
|
||||
#else
|
||||
|
||||
static inline struct spi_device *
|
||||
of_find_spi_device_by_node(struct device_node *node)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* IS_ENABLED(CONFIG_OF) */
|
||||
|
||||
/* Compatibility layer */
|
||||
#define spi_master spi_controller
|
||||
|
||||
|
||||
Reference in New Issue
Block a user