diff options
46 files changed, 3018 insertions, 5988 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 8b5f717b8c8c..0ac883777318 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1674,6 +1674,12 @@ In such case C2/C3 won't be used again. idle=nomwait: Disable mwait for CPU C-states + idxd.sva= [HW] + Format: <bool> + Allow force disabling of Shared Virtual Memory (SVA) + support for the idxd driver. By default it is set to + true (1). + ieee754= [MIPS] Select IEEE Std 754 conformance mode Format: { strict | legacy | 2008 | relaxed } Default: strict diff --git a/Documentation/devicetree/bindings/dma/ingenic,dma.yaml b/Documentation/devicetree/bindings/dma/ingenic,dma.yaml index 6a2043721b95..ac4d59494fc8 100644 --- a/Documentation/devicetree/bindings/dma/ingenic,dma.yaml +++ b/Documentation/devicetree/bindings/dma/ingenic,dma.yaml @@ -17,6 +17,8 @@ properties: enum: - ingenic,jz4740-dma - ingenic,jz4725b-dma + - ingenic,jz4760-dma + - ingenic,jz4760b-dma - ingenic,jz4770-dma - ingenic,jz4780-dma - ingenic,x1000-dma diff --git a/Documentation/devicetree/bindings/dma/intel,ldma.yaml b/Documentation/devicetree/bindings/dma/intel,ldma.yaml new file mode 100644 index 000000000000..a5c4be783593 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/intel,ldma.yaml @@ -0,0 +1,116 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dma/intel,ldma.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Lightning Mountain centralized DMA controllers. + +maintainers: + - chuanhua.lei@intel.com + - mallikarjunax.reddy@intel.com + +allOf: + - $ref: "dma-controller.yaml#" + +properties: + compatible: + enum: + - intel,lgm-cdma + - intel,lgm-dma2tx + - intel,lgm-dma1rx + - intel,lgm-dma1tx + - intel,lgm-dma0tx + - intel,lgm-dma3 + - intel,lgm-toe-dma30 + - intel,lgm-toe-dma31 + + reg: + maxItems: 1 + + "#dma-cells": + const: 3 + description: + The first cell is the peripheral's DMA request line. + The second cell is the peripheral's (port) number corresponding to the channel. + The third cell is the burst length of the channel. + + dma-channels: + minimum: 1 + maximum: 16 + + dma-channel-mask: + maxItems: 1 + + clocks: + maxItems: 1 + + resets: + maxItems: 1 + + reset-names: + items: + - const: ctrl + + interrupts: + maxItems: 1 + + intel,dma-poll-cnt: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + DMA descriptor polling counter is used to control the poling mechanism + for the descriptor fetching for all channels. + + intel,dma-byte-en: + type: boolean + description: + DMA byte enable is only valid for DMA write(RX). + Byte enable(1) means DMA write will be based on the number of dwords + instead of the whole burst. + + intel,dma-drb: + type: boolean + description: + DMA descriptor read back to make sure data and desc synchronization. + + intel,dma-dburst-wr: + type: boolean + description: + Enable RX dynamic burst write. When it is enabled, the DMA does RX dynamic burst; + if it is disabled, the DMA RX will still support programmable fixed burst size of 2,4,8,16. + It only applies to RX DMA and memcopy DMA. + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + dma0: dma-controller@e0e00000 { + compatible = "intel,lgm-cdma"; + reg = <0xe0e00000 0x1000>; + #dma-cells = <3>; + dma-channels = <16>; + dma-channel-mask = <0xFFFF>; + interrupt-parent = <&ioapic1>; + interrupts = <82 1>; + resets = <&rcu0 0x30 0>; + reset-names = "ctrl"; + clocks = <&cgu0 80>; + intel,dma-poll-cnt = <4>; + intel,dma-byte-en; + intel,dma-drb; + }; + - | + dma3: dma-controller@ec800000 { + compatible = "intel,lgm-dma3"; + reg = <0xec800000 0x1000>; + clocks = <&cgu0 71>; + resets = <&rcu0 0x10 9>; + #dma-cells = <3>; + intel,dma-poll-cnt = <16>; + intel,dma-byte-en; + intel,dma-dburst-wr; + }; diff --git a/Documentation/devicetree/bindings/dma/owl-dma.yaml b/Documentation/devicetree/bindings/dma/owl-dma.yaml index 256d62af2c64..93b4847554fb 100644 --- a/Documentation/devicetree/bindings/dma/owl-dma.yaml +++ b/Documentation/devicetree/bindings/dma/owl-dma.yaml @@ -8,8 +8,8 @@ title: Actions Semi Owl SoCs DMA controller description: | The OWL DMA is a general-purpose direct memory access controller capable of - supporting 10 and 12 independent DMA channels for S700 and S900 SoCs - respectively. + supporting 10 independent DMA channels for the Actions Semi S700 SoC and 12 + independent DMA channels for the S500 and S900 SoC variants. maintainers: - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> @@ -20,8 +20,9 @@ allOf: properties: compatible: enum: - - actions,s900-dma + - actions,s500-dma - actions,s700-dma + - actions,s900-dma reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml index c07eb6f2fc8d..7f2a54bc732d 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml @@ -14,34 +14,37 @@ allOf: properties: compatible: - items: - - enum: - - renesas,dmac-r8a7742 # RZ/G1H - - renesas,dmac-r8a7743 # RZ/G1M - - renesas,dmac-r8a7744 # RZ/G1N - - renesas,dmac-r8a7745 # RZ/G1E - - renesas,dmac-r8a77470 # RZ/G1C - - renesas,dmac-r8a774a1 # RZ/G2M - - renesas,dmac-r8a774b1 # RZ/G2N - - renesas,dmac-r8a774c0 # RZ/G2E - - renesas,dmac-r8a774e1 # RZ/G2H - - renesas,dmac-r8a7790 # R-Car H2 - - renesas,dmac-r8a7791 # R-Car M2-W - - renesas,dmac-r8a7792 # R-Car V2H - - renesas,dmac-r8a7793 # R-Car M2-N - - renesas,dmac-r8a7794 # R-Car E2 - - renesas,dmac-r8a7795 # R-Car H3 - - renesas,dmac-r8a7796 # R-Car M3-W - - renesas,dmac-r8a77961 # R-Car M3-W+ - - renesas,dmac-r8a77965 # R-Car M3-N - - renesas,dmac-r8a77970 # R-Car V3M - - renesas,dmac-r8a77980 # R-Car V3H - - renesas,dmac-r8a77990 # R-Car E3 - - renesas,dmac-r8a77995 # R-Car D3 - - const: renesas,rcar-dmac - - reg: - maxItems: 1 + oneOf: + - items: + - enum: + - renesas,dmac-r8a7742 # RZ/G1H + - renesas,dmac-r8a7743 # RZ/G1M + - renesas,dmac-r8a7744 # RZ/G1N + - renesas,dmac-r8a7745 # RZ/G1E + - renesas,dmac-r8a77470 # RZ/G1C + - renesas,dmac-r8a774a1 # RZ/G2M + - renesas,dmac-r8a774b1 # RZ/G2N + - renesas,dmac-r8a774c0 # RZ/G2E + - renesas,dmac-r8a774e1 # RZ/G2H + - renesas,dmac-r8a7790 # R-Car H2 + - renesas,dmac-r8a7791 # R-Car M2-W + - renesas,dmac-r8a7792 # R-Car V2H + - renesas,dmac-r8a7793 # R-Car M2-N + - renesas,dmac-r8a7794 # R-Car E2 + - renesas,dmac-r8a7795 # R-Car H3 + - renesas,dmac-r8a7796 # R-Car M3-W + - renesas,dmac-r8a77961 # R-Car M3-W+ + - renesas,dmac-r8a77965 # R-Car M3-N + - renesas,dmac-r8a77970 # R-Car V3M + - renesas,dmac-r8a77980 # R-Car V3H + - renesas,dmac-r8a77990 # R-Car E3 + - renesas,dmac-r8a77995 # R-Car D3 + - const: renesas,rcar-dmac + + - items: + - const: renesas,dmac-r8a779a0 # R-Car V3U + + reg: true interrupts: minItems: 9 @@ -110,6 +113,23 @@ required: - power-domains - resets +if: + properties: + compatible: + contains: + enum: + - renesas,dmac-r8a779a0 +then: + properties: + reg: + items: + - description: Base register block + - description: Channel register block +else: + properties: + reg: + maxItems: 1 + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt b/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt deleted file mode 100644 index ccd52d6a231a..000000000000 --- a/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt +++ /dev/null @@ -1,44 +0,0 @@ -* CSR SiRFSoC DMA controller - -See dma.txt first - -Required properties: -- compatible: Should be "sirf,prima2-dmac", "sirf,atlas7-dmac" or - "sirf,atlas7-dmac-v2" -- reg: Should contain DMA registers location and length. -- interrupts: Should contain one interrupt shared by all channel -- #dma-cells: must be <1>. used to represent the number of integer - cells in the dmas property of client device. -- clocks: clock required - -Example: - -Controller: -dmac0: dma-controller@b00b0000 { - compatible = "sirf,prima2-dmac"; - reg = <0xb00b0000 0x10000>; - interrupts = <12>; - clocks = <&clks 24>; - #dma-cells = <1>; -}; - - -Client: -Fill the specific dma request line in dmas. In the below example, spi0 read -channel request line is 9 of the 2nd dma controller, while write channel uses -4 of the 2nd dma controller; spi1 read channel request line is 12 of the 1st -dma controller, while write channel uses 13 of the 1st dma controller: - -spi0: spi@b00d0000 { - compatible = "sirf,prima2-spi"; - dmas = <&dmac1 9>, - <&dmac1 4>; - dma-names = "rx", "tx"; -}; - -spi1: spi@b0170000 { - compatible = "sirf,prima2-spi"; - dmas = <&dmac0 12>, - <&dmac0 13>; - dma-names = "rx", "tx"; -}; diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt deleted file mode 100644 index dbe160400adc..000000000000 --- a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt +++ /dev/null @@ -1,39 +0,0 @@ -Synopsys DesignWare AXI DMA Controller - -Required properties: -- compatible: "snps,axi-dma-1.01a" -- reg: Address range of the DMAC registers. This should include - all of the per-channel registers. -- interrupt: Should contain the DMAC interrupt number. -- dma-channels: Number of channels supported by hardware. -- snps,dma-masters: Number of AXI masters supported by the hardware. -- snps,data-width: Maximum AXI data width supported by hardware. - (0 - 8bits, 1 - 16bits, 2 - 32bits, ..., 6 - 512bits) -- snps,priority: Priority of channel. Array size is equal to the number of - dma-channels. Priority value must be programmed within [0:dma-channels-1] - range. (0 - minimum priority) -- snps,block-size: Maximum block size supported by the controller channel. - Array size is equal to the number of dma-channels. - -Optional properties: -- snps,axi-max-burst-len: Restrict master AXI burst length by value specified - in this property. If this property is missing the maximum AXI burst length - supported by DMAC is used. [1:256] - -Example: - -dmac: dma-controller@80000 { - compatible = "snps,axi-dma-1.01a"; - reg = <0x80000 0x400>; - clocks = <&core_clk>, <&cfgr_clk>; - clock-names = "core-clk", "cfgr-clk"; - interrupt-parent = <&intc>; - interrupts = <27>; - - dma-channels = <4>; - snps,dma-masters = <2>; - snps,data-width = <3>; - snps,block-size = <4096 4096 4096 4096>; - snps,priority = <0 1 2 3>; - snps,axi-max-burst-len = <16>; -}; diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml new file mode 100644 index 000000000000..79e241498e25 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml @@ -0,0 +1,126 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dma/snps,dw-axi-dmac.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Synopsys DesignWare AXI DMA Controller + +maintainers: + - Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> + - Jee Heng Sia <jee.heng.sia@intel.com> + +description: + Synopsys DesignWare AXI DMA Controller DT Binding + +allOf: + - $ref: "dma-controller.yaml#" + +properties: + compatible: + enum: + - snps,axi-dma-1.01a + - intel,kmb-axi-dma + + reg: + minItems: 1 + items: + - description: Address range of the DMAC registers + - description: Address range of the DMAC APB registers + + reg-names: + items: + - const: axidma_ctrl_regs + - const: axidma_apb_regs + + interrupts: + maxItems: 1 + + clocks: + items: + - description: Bus Clock + - description: Module Clock + + clock-names: + items: + - const: core-clk + - const: cfgr-clk + + '#dma-cells': + const: 1 + + dma-channels: + minimum: 1 + maximum: 8 + + snps,dma-masters: + description: | + Number of AXI masters supported by the hardware. + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [1, 2] + + snps,data-width: + description: | + AXI data width supported by hardware. + (0 - 8bits, 1 - 16bits, 2 - 32bits, ..., 6 - 512bits) + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [0, 1, 2, 3, 4, 5, 6] + + snps,priority: + description: | + Channel priority specifier associated with the DMA channels. + $ref: /schemas/types.yaml#/definitions/uint32-array + minItems: 1 + maxItems: 8 + + snps,block-size: + description: | + Channel block size specifier associated with the DMA channels. + $ref: /schemas/types.yaml#/definitions/uint32-array + minItems: 1 + maxItems: 8 + + snps,axi-max-burst-len: + description: | + Restrict master AXI burst length by value specified in this property. + If this property is missing the maximum AXI burst length supported by + DMAC is used. + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 1 + maximum: 256 + +required: + - compatible + - reg + - clocks + - clock-names + - interrupts + - '#dma-cells' + - dma-channels + - snps,dma-masters + - snps,data-width + - snps,priority + - snps,block-size + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/interrupt-controller/irq.h> + /* example with snps,dw-axi-dmac */ + dmac: dma-controller@80000 { + compatible = "snps,axi-dma-1.01a"; + reg = <0x80000 0x400>; + clocks = <&core_clk>, <&cfgr_clk>; + clock-names = "core-clk", "cfgr-clk"; + interrupt-parent = <&intc>; + interrupts = <27>; + #dma-cells = <1>; + dma-channels = <4>; + snps,dma-masters = <2>; + snps,data-width = <3>; + snps,block-size = <4096 4096 4096 4096>; + snps,priority = <0 1 2 3>; + snps,axi-max-burst-len = <16>; + }; diff --git a/Documentation/devicetree/bindings/dma/ste-coh901318.txt b/Documentation/devicetree/bindings/dma/ste-coh901318.txt deleted file mode 100644 index 091ad057e9cf..000000000000 --- a/Documentation/devicetree/bindings/dma/ste-coh901318.txt +++ /dev/null @@ -1,32 +0,0 @@ -ST-Ericsson COH 901 318 DMA Controller - -This is a DMA controller which has begun as a fork of the -ARM PL08x PrimeCell VHDL code. - -Required properties: -- compatible: should be "stericsson,coh901318" -- reg: register locations and length -- interrupts: the single DMA IRQ -- #dma-cells: must be set to <1>, as the channels on the - COH 901 318 are simple and identified by a single number -- dma-channels: the number of DMA channels handled - -Example: - -dmac: dma-controller@c00020000 { - compatible = "stericsson,coh901318"; - reg = <0xc0020000 0x1000>; - interrupt-parent = <&vica>; - interrupts = <2>; - #dma-cells = <1>; - dma-channels = <40>; -}; - -Consumers example: - -uart0: serial@c0013000 { - compatible = "..."; - (...) - dmas = <&dmac 17 &dmac 18>; - dma-names = "tx", "rx"; -}; diff --git a/Documentation/devicetree/bindings/dma/zxdma.txt b/Documentation/devicetree/bindings/dma/zxdma.txt deleted file mode 100644 index 0ab80f69e566..000000000000 --- a/Documentation/devicetree/bindings/dma/zxdma.txt +++ /dev/null @@ -1,38 +0,0 @@ -* ZTE ZX296702 DMA controller - -Required properties: -- compatible: Should be "zte,zx296702-dma" -- reg: Should contain DMA registers location and length. -- interrupts: Should contain one interrupt shared by all channel -- #dma-cells: see dma.txt, should be 1, para number -- dma-channels: physical channels supported -- dma-requests: virtual channels supported, each virtual channel - have specific request line -- clocks: clock required - -Example: - -Controller: - dma: dma-controller@09c00000{ - compatible = "zte,zx296702-dma"; - reg = <0x09c00000 0x1000>; - clocks = <&topclk ZX296702_DMA_ACLK>; - interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>; - #dma-cells = <1>; - dma-channels = <24>; - dma-requests = <24>; - }; - -Client: -Use specific request line passing from dmax -For example, spdif0 tx channel request line is 4 - spdif0: spdif0@b004000 { - #sound-dai-cells = <0>; - compatible = "zte,zx296702-spdif"; - reg = <0x0b004000 0x1000>; - clocks = <&lsp0clk ZX296702_SPDIF0_DIV>; - clock-names = "tx"; - interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>; - dmas = <&dma 4>; - dma-names = "tx"; - } diff --git a/MAINTAINERS b/MAINTAINERS index dace4bd105d5..1d75afad615f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2828,9 +2828,7 @@ S: Odd fixes W: http://sourceforge.net/projects/xscaleiop F: Documentation/crypto/async-tx-api.rst F: crypto/async_tx/ -F: drivers/dma/ F: include/linux/async_tx.h -F: include/linux/dmaengine.h AT24 EEPROM DRIVER M: Bartosz Golaszewski <bgolaszewski@baylibre.com> @@ -5271,6 +5269,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git F: Documentation/devicetree/bindings/dma/ F: Documentation/driver-api/dmaengine/ F: drivers/dma/ +F: include/linux/dma/ F: include/linux/dmaengine.h F: include/linux/of_dma.h @@ -11614,7 +11613,6 @@ F: drivers/dma/at_hdmac.c F: drivers/dma/at_hdmac_regs.h F: drivers/dma/at_xdmac.c F: include/dt-bindings/dma/at91.h -F: include/linux/platform_data/dma-atmel.h MICROCHIP AT91 SERIAL DRIVER M: Richard Genoud <richard.genoud@gmail.com> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d242c7632621..0c2827fd8c19 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -124,13 +124,6 @@ config BCM_SBA_RAID has the capability to offload memcpy, xor and pq computation for raid5/6. -config COH901318 - bool "ST-Ericsson COH901318 DMA support" - select DMA_ENGINE - depends on ARCH_U300 || COMPILE_TEST - help - Enable support for ST-Ericsson COH 901 318 DMA. - config DMA_BCM2835 tristate "BCM2835 DMA engine support" depends on ARCH_BCM2835 @@ -179,6 +172,7 @@ config DMA_SUN6I config DW_AXI_DMAC tristate "Synopsys DesignWare AXI DMA support" depends on OF || COMPILE_TEST + depends on HAS_IOMEM select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help @@ -378,14 +372,14 @@ config MILBEAUT_XDMAC XDMAC device. config MMP_PDMA - bool "MMP PDMA support" + tristate "MMP PDMA support" depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST select DMA_ENGINE help Support the MMP PDMA engine for PXA and MMP platform. config MMP_TDMA - bool "MMP Two-Channel DMA support" + tristate "MMP Two-Channel DMA support" depends on ARCH_MMP || COMPILE_TEST select DMA_ENGINE select GENERIC_ALLOCATOR @@ -519,13 +513,6 @@ config PLX_DMA These are exposed via extra functions on the switch's upstream port. Each function exposes one DMA channel. -config SIRF_DMA - tristate "CSR SiRFprimaII/SiRFmarco DMA support" - depends on ARCH_SIRF - select DMA_ENGINE - help - Enable support for the CSR SiRFprimaII DMA engine. - config STE_DMA40 bool "ST-Ericsson DMA40 support" depends on ARCH_U8500 @@ -710,15 +697,6 @@ config XILINX_ZYNQMP_DPDMA driver provides the dmaengine required by the DisplayPort subsystem display driver. -config ZX_DMA - tristate "ZTE ZX DMA support" - depends on ARCH_ZX || COMPILE_TEST - select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS - help - Support the DMA engine for ZTE ZX family platform devices. - - # driver files source "drivers/dma/bestcomm/Kconfig" @@ -740,6 +718,8 @@ source "drivers/dma/ti/Kconfig" source "drivers/dma/fsl-dpaa2-qdma/Kconfig" +source "drivers/dma/lgm/Kconfig" + # clients comment "DMA Clients" depends on DMA_ENGINE diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 948a8da05f8b..aa69094e3547 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -20,7 +20,6 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_AT_XDMAC) += at_xdmac.o obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o obj-$(CONFIG_BCM_SBA_RAID) += bcm-sba-raid.o -obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o @@ -65,7 +64,6 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ obj-$(CONFIG_PXA_DMA) += pxa_dma.o obj-$(CONFIG_RENESAS_DMA) += sh/ obj-$(CONFIG_SF_PDMA) += sf-pdma/ -obj-$(CONFIG_SIRF_DMA) += sirf-dma.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_STM32_DMA) += stm32-dma.o obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o @@ -79,9 +77,9 @@ obj-$(CONFIG_TIMB_DMA) += timb_dma.o obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o obj-$(CONFIG_UNIPHIER_XDMAC) += uniphier-xdmac.o obj-$(CONFIG_XGENE_DMA) += xgene-dma.o -obj-$(CONFIG_ZX_DMA) += zx_dma.o obj-$(CONFIG_ST_FDMA) += st_fdma.o obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/ +obj-$(CONFIG_INTEL_LDMA) += lgm/ obj-y += mediatek/ obj-y += qcom/ diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 7eaee5b705b1..30ae36124b1d 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -54,6 +54,25 @@ module_param(init_nr_desc_per_channel, uint, 0644); MODULE_PARM_DESC(init_nr_desc_per_channel, "initial descriptors per channel (default: 64)"); +/** + * struct at_dma_platform_data - Controller configuration parameters + * @nr_channels: Number of channels supported by hardware (max 8) + * @cap_mask: dma_capability flags supported by the platform + */ +struct at_dma_platform_data { + unsigned int nr_channels; + dma_cap_mask_t cap_mask; +}; + +/** + * struct at_dma_slave - Controller-specific information about a slave + * @dma_dev: required DMA master device + * @cfg: Platform-specific initializer for the CFG register + */ +struct at_dma_slave { + struct device *dma_dev; + u32 cfg; +}; /* prototypes */ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 80fc2fe8c77e..4d1ebc040031 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -7,8 +7,6 @@ #ifndef AT_HDMAC_REGS_H #define AT_HDMAC_REGS_H -#include <linux/platform_data/dma-atmel.h> - #define AT_DMA_MAX_NR_CHANNELS 8 @@ -148,7 +146,31 @@ #define ATC_AUTO (0x1 << 31) /* Auto multiple buffer tx enable */ /* Bitfields in CFG */ -/* are in at_hdmac.h */ +#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */ + +#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */ +#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */ +#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */ +#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */ +#define ATC_SRC_H2SEL_SW (0x0 << 9) +#define ATC_SRC_H2SEL_HW (0x1 << 9) +#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */ +#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */ +#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */ +#define ATC_DST_H2SEL_SW (0x0 << 13) +#define ATC_DST_H2SEL_HW (0x1 << 13) +#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */ +#define ATC_SOD (0x1 << 16) /* Stop On Done */ +#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */ +#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */ +#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */ +#define ATC_LOCK_IF_L_CHUNK (0x0 << 22) +#define ATC_LOCK_IF_L_BUFFER (0x1 << 22) +#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */ +#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */ +#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28) +#define ATC_FIFOCFG_HALFFIFO (0x1 << 28) +#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28) /* Bitfields in SPIP */ #define ATC_SPIP_HOLE(x) (0xFFFFU & (x)) diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c deleted file mode 100644 index 95b9b2f5358e..000000000000 --- a/drivers/dma/coh901318.c +++ /dev/null @@ -1,2808 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * driver/dma/coh901318.c - * - * Copyright (C) 2007-2009 ST-Ericsson - * DMA driver for COH 901 318 - * Author: Per Friden <per.friden@stericsson.com> - */ - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/kernel.h> /* printk() */ -#include <linux/fs.h> /* everything... */ -#include <linux/scatterlist.h> -#include <linux/slab.h> /* kmalloc() */ -#include <linux/dmaengine.h> -#include <linux/platform_device.h> -#include <linux/device.h> -#include <linux/irqreturn.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/uaccess.h> -#include <linux/debugfs.h> -#include <linux/platform_data/dma-coh901318.h> -#include <linux/of_dma.h> - -#include "coh901318.h" -#include "dmaengine.h" - -#define COH901318_MOD32_MASK (0x1F) -#define COH901318_WORD_MASK (0xFFFFFFFF) -/* INT_STATUS - Interrupt Status Registers 32bit (R/-) */ -#define COH901318_INT_STATUS1 (0x0000) -#define COH901318_INT_STATUS2 (0x0004) -/* TC_INT_STATUS - Terminal Count Interrupt Status Registers 32bit (R/-) */ -#define COH901318_TC_INT_STATUS1 (0x0008) -#define COH901318_TC_INT_STATUS2 (0x000C) -/* TC_INT_CLEAR - Terminal Count Interrupt Clear Registers 32bit (-/W) */ -#define COH901318_TC_INT_CLEAR1 (0x0010) -#define COH901318_TC_INT_CLEAR2 (0x0014) -/* RAW_TC_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */ -#define COH901318_RAW_TC_INT_STATUS1 (0x0018) -#define COH901318_RAW_TC_INT_STATUS2 (0x001C) -/* BE_INT_STATUS - Bus Error Interrupt Status Registers 32bit (R/-) */ -#define COH901318_BE_INT_STATUS1 (0x0020) -#define COH901318_BE_INT_STATUS2 (0x0024) -/* BE_INT_CLEAR - Bus Error Interrupt Clear Registers 32bit (-/W) */ -#define COH901318_BE_INT_CLEAR1 (0x0028) -#define COH901318_BE_INT_CLEAR2 (0x002C) -/* RAW_BE_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */ -#define COH901318_RAW_BE_INT_STATUS1 (0x0030) -#define COH901318_RAW_BE_INT_STATUS2 (0x0034) - -/* - * CX_CFG - Channel Configuration Registers 32bit (R/W) - */ -#define COH901318_CX_CFG (0x0100) -#define COH901318_CX_CFG_SPACING (0x04) -/* Channel enable activates tha dma job */ -#define COH901318_CX_CFG_CH_ENABLE (0x00000001) -#define COH901318_CX_CFG_CH_DISABLE (0x00000000) -/* Request Mode */ -#define COH901318_CX_CFG_RM_MASK (0x00000006) -#define COH901318_CX_CFG_RM_MEMORY_TO_MEMORY (0x0 << 1) -#define COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY (0x1 << 1) -#define COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY (0x1 << 1) -#define COH901318_CX_CFG_RM_PRIMARY_TO_SECONDARY (0x3 << 1) -#define COH901318_CX_CFG_RM_SECONDARY_TO_PRIMARY (0x3 << 1) -/* Linked channel request field. RM must == 11 */ -#define COH901318_CX_CFG_LCRF_SHIFT 3 -#define COH901318_CX_CFG_LCRF_MASK (0x000001F8) -#define COH901318_CX_CFG_LCR_DISABLE (0x00000000) -/* Terminal Counter Interrupt Request Mask */ -#define COH901318_CX_CFG_TC_IRQ_ENABLE (0x00000200) -#define COH901318_CX_CFG_TC_IRQ_DISABLE (0x00000000) -/* Bus Error interrupt Mask */ -#define COH901318_CX_CFG_BE_IRQ_ENABLE (0x00000400) -#define COH901318_CX_CFG_BE_IRQ_DISABLE (0x00000000) - -/* - * CX_STAT - Channel Status Registers 32bit (R/-) - */ -#define COH901318_CX_STAT (0x0200) -#define COH901318_CX_STAT_SPACING (0x04) -#define COH901318_CX_STAT_RBE_IRQ_IND (0x00000008) -#define COH901318_CX_STAT_RTC_IRQ_IND (0x00000004) -#define COH901318_CX_STAT_ACTIVE (0x00000002) -#define COH901318_CX_STAT_ENABLED (0x00000001) - -/* - * CX_CTRL - Channel Control Registers 32bit (R/W) - */ -#define COH901318_CX_CTRL (0x0400) -#define COH901318_CX_CTRL_SPACING (0x10) -/* Transfer Count Enable */ -#define COH901318_CX_CTRL_TC_ENABLE (0x00001000) -#define COH901318_CX_CTRL_TC_DISABLE (0x00000000) -/* Transfer Count Value 0 - 4095 */ -#define COH901318_CX_CTRL_TC_VALUE_MASK (0x00000FFF) -/* Burst count */ -#define COH901318_CX_CTRL_BURST_COUNT_MASK (0x0000E000) -#define COH901318_CX_CTRL_BURST_COUNT_64_BYTES (0x7 << 13) -#define COH901318_CX_CTRL_BURST_COUNT_48_BYTES (0x6 << 13) -#define COH901318_CX_CTRL_BURST_COUNT_32_BYTES (0x5 << 13) -#define COH901318_CX_CTRL_BURST_COUNT_16_BYTES (0x4 << 13) -#define COH901318_CX_CTRL_BURST_COUNT_8_BYTES (0x3 << 13) -#define COH901318_CX_CTRL_BURST_COUNT_4_BYTES (0x2 << 13) -#define COH901318_CX_CTRL_BURST_COUNT_2_BYTES (0x1 << 13) -#define COH901318_CX_CTRL_BURST_COUNT_1_BYTE (0x0 << 13) -/* Source bus size */ -#define COH901318_CX_CTRL_SRC_BUS_SIZE_MASK (0x00030000) -#define COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS (0x2 << 16) -#define COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS (0x1 << 16) -#define COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS (0x0 << 16) -/* Source address increment */ -#define COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE (0x00040000) -#define COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE (0x00000000) -/* Destination Bus Size */ -#define COH901318_CX_CTRL_DST_BUS_SIZE_MASK (0x00180000) -#define COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS (0x2 << 19) -#define COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS (0x1 << 19) -#define COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS (0x0 << 19) -/* Destination address increment */ -#define COH901318_CX_CTRL_DST_ADDR_INC_ENABLE (0x00200000) -#define COH901318_CX_CTRL_DST_ADDR_INC_DISABLE (0x00000000) -/* Master Mode (Master2 is only connected to MSL) */ -#define COH901318_CX_CTRL_MASTER_MODE_MASK (0x00C00000) -#define COH901318_CX_CTRL_MASTER_MODE_M2R_M1W (0x3 << 22) -#define COH901318_CX_CTRL_MASTER_MODE_M1R_M2W (0x2 << 22) -#define COH901318_CX_CTRL_MASTER_MODE_M2RW (0x1 << 22) -#define COH901318_CX_CTRL_MASTER_MODE_M1RW (0x0 << 22) -/* Terminal Count flag to PER enable */ -#define COH901318_CX_CTRL_TCP_ENABLE (0x01000000) -#define COH901318_CX_CTRL_TCP_DISABLE (0x00000000) -/* Terminal Count flags to CPU enable */ -#define COH901318_CX_CTRL_TC_IRQ_ENABLE (0x02000000) -#define COH901318_CX_CTRL_TC_IRQ_DISABLE (0x00000000) -/* Hand shake to peripheral */ -#define COH901318_CX_CTRL_HSP_ENABLE (0x04000000) -#define COH901318_CX_CTRL_HSP_DISABLE (0x00000000) -#define COH901318_CX_CTRL_HSS_ENABLE (0x08000000) -#define COH901318_CX_CTRL_HSS_DISABLE (0x00000000) -/* DMA mode */ -#define COH901318_CX_CTRL_DDMA_MASK (0x30000000) -#define COH901318_CX_CTRL_DDMA_LEGACY (0x0 << 28) -#define COH901318_CX_CTRL_DDMA_DEMAND_DMA1 (0x1 << 28) -#define COH901318_CX_CTRL_DDMA_DEMAND_DMA2 (0x2 << 28) -/* Primary Request Data Destination */ -#define COH901318_CX_CTRL_PRDD_MASK (0x40000000) -#define COH901318_CX_CTRL_PRDD_DEST (0x1 << 30) -#define COH901318_CX_CTRL_PRDD_SOURCE (0x0 << 30) - -/* - * CX_SRC_ADDR - Channel Source Address Registers 32bit (R/W) - */ -#define COH901318_CX_SRC_ADDR (0x0404) -#define COH901318_CX_SRC_ADDR_SPACING (0x10) - -/* - * CX_DST_ADDR - Channel Destination Address Registers 32bit R/W - */ -#define COH901318_CX_DST_ADDR (0x0408) -#define COH901318_CX_DST_ADDR_SPACING (0x10) - -/* - * CX_LNK_ADDR - Channel Link Address Registers 32bit (R/W) - */ -#define COH901318_CX_LNK_ADDR (0x040C) -#define COH901318_CX_LNK_ADDR_SPACING (0x10) -#define COH901318_CX_LNK_LINK_IMMEDIATE (0x00000001) - -/** - * struct coh901318_params - parameters for DMAC configuration - * @config: DMA config register - * @ctrl_lli_last: DMA control register for the last lli in the list - * @ctrl_lli: DMA control register for an lli - * @ctrl_lli_chained: DMA control register for a chained lli - */ -struct coh901318_params { - u32 config; - u32 ctrl_lli_last; - u32 ctrl_lli; - u32 ctrl_lli_chained; -}; - -/** - * struct coh_dma_channel - dma channel base - * @name: ascii name of dma channel - * @number: channel id number - * @desc_nbr_max: number of preallocated descriptors - * @priority_high: prio of channel, 0 low otherwise high. - * @param: configuration parameters - */ -struct coh_dma_channel { - const char name[32]; - const int number; - const int desc_nbr_max; - const int priority_high; - const struct coh901318_params param; -}; - -/** - * struct powersave - DMA power save structure - * @lock: lock protecting data in this struct - * @started_channels: bit mask indicating active dma channels - */ -struct powersave { - spinlock_t lock; - u64 started_channels; -}; - -/* points out all dma slave channels. - * Syntax is [A1, B1, A2, B2, .... ,-1,-1] - * Select all channels from A to B, end of list is marked with -1,-1 - */ -static int dma_slave_channels[] = { - U300_DMA_MSL_TX_0, U300_DMA_SPI_RX, - U300_DMA_UART1_TX, U300_DMA_UART1_RX, -1, -1}; - -/* points out all dma memcpy channels. */ -static int dma_memcpy_channels[] = { - U300_DMA_GENERAL_PURPOSE_0, U300_DMA_GENERAL_PURPOSE_8, -1, -1}; - -#define flags_memcpy_config (COH901318_CX_CFG_CH_DISABLE | \ - COH901318_CX_CFG_RM_MEMORY_TO_MEMORY | \ - COH901318_CX_CFG_LCR_DISABLE | \ - COH901318_CX_CFG_TC_IRQ_ENABLE | \ - COH901318_CX_CFG_BE_IRQ_ENABLE) -#define flags_memcpy_lli_chained (COH901318_CX_CTRL_TC_ENABLE | \ - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \ - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \ - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \ - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \ - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \ - COH901318_CX_CTRL_MASTER_MODE_M1RW | \ - COH901318_CX_CTRL_TCP_DISABLE | \ - COH901318_CX_CTRL_TC_IRQ_DISABLE | \ - COH901318_CX_CTRL_HSP_DISABLE | \ - COH901318_CX_CTRL_HSS_DISABLE | \ - COH901318_CX_CTRL_DDMA_LEGACY | \ - COH901318_CX_CTRL_PRDD_SOURCE) -#define flags_memcpy_lli (COH901318_CX_CTRL_TC_ENABLE | \ - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \ - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \ - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \ - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \ - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \ - COH901318_CX_CTRL_MASTER_MODE_M1RW | \ - COH901318_CX_CTRL_TCP_DISABLE | \ - COH901318_CX_CTRL_TC_IRQ_DISABLE | \ - COH901318_CX_CTRL_HSP_DISABLE | \ - COH901318_CX_CTRL_HSS_DISABLE | \ - COH901318_CX_CTRL_DDMA_LEGACY | \ - COH901318_CX_CTRL_PRDD_SOURCE) -#define flags_memcpy_lli_last (COH901318_CX_CTRL_TC_ENABLE | \ - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \ - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \ - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \ - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \ - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \ - COH901318_CX_CTRL_MASTER_MODE_M1RW | \ - COH901318_CX_CTRL_TCP_DISABLE | \ - COH901318_CX_CTRL_TC_IRQ_ENABLE | \ - COH901318_CX_CTRL_HSP_DISABLE | \ - COH901318_CX_CTRL_HSS_DISABLE | \ - COH901318_CX_CTRL_DDMA_LEGACY | \ - COH901318_CX_CTRL_PRDD_SOURCE) - -static const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = { - { - .number = U300_DMA_MSL_TX_0, - .name = "MSL TX 0", - .priority_high = 0, - }, - { - .number = U300_DMA_MSL_TX_1, - .name = "MSL TX 1", - .priority_high = 0, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - }, - { - .number = U300_DMA_MSL_TX_2, - .name = "MSL TX 2", - .priority_high = 0, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - .desc_nbr_max = 10, - }, - { - .number = U300_DMA_MSL_TX_3, - .name = "MSL TX 3", - .priority_high = 0, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - }, - { - .number = U300_DMA_MSL_TX_4, - .name = "MSL TX 4", - .priority_high = 0, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - }, - { - .number = U300_DMA_MSL_TX_5, - .name = "MSL TX 5", - .priority_high = 0, - }, - { - .number = U300_DMA_MSL_TX_6, - .name = "MSL TX 6", - .priority_high = 0, - }, - { - .number = U300_DMA_MSL_RX_0, - .name = "MSL RX 0", - .priority_high = 0, - }, - { - .number = U300_DMA_MSL_RX_1, - .name = "MSL RX 1", - .priority_high = 0, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | - COH901318_CX_CTRL_PRDD_DEST, - .param.ctrl_lli = 0, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | - COH901318_CX_CTRL_PRDD_DEST, - }, - { - .number = U300_DMA_MSL_RX_2, - .name = "MSL RX 2", - .priority_high = 0, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | - COH901318_CX_CTRL_PRDD_DEST, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | - COH901318_CX_CTRL_PRDD_DEST, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | - COH901318_CX_CTRL_PRDD_DEST, - }, - { - .number = U300_DMA_MSL_RX_3, - .name = "MSL RX 3", - .priority_high = 0, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | - COH901318_CX_CTRL_PRDD_DEST, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | - COH901318_CX_CTRL_PRDD_DEST, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | - COH901318_CX_CTRL_PRDD_DEST, - }, - { - .number = U300_DMA_MSL_RX_4, - .name = "MSL RX 4", - .priority_high = 0, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | - COH901318_CX_CTRL_PRDD_DEST, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | - COH901318_CX_CTRL_PRDD_DEST, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | - COH901318_CX_CTRL_PRDD_DEST, - }, - { - .number = U300_DMA_MSL_RX_5, - .name = "MSL RX 5", - .priority_high = 0, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | - COH901318_CX_CTRL_PRDD_DEST, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | - COH901318_CX_CTRL_PRDD_DEST, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_32_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | - COH901318_CX_CTRL_PRDD_DEST, - }, - { - .number = U300_DMA_MSL_RX_6, - .name = "MSL RX 6", - .priority_high = 0, - }, - /* - * Don't set up device address, burst count or size of src - * or dst bus for this peripheral - handled by PrimeCell - * DMA extension. - */ - { - .number = U300_DMA_MMCSD_RX_TX, - .name = "MMCSD RX TX", - .priority_high = 0, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY, - - }, - { - .number = U300_DMA_MSPRO_TX, - .name = "MSPRO TX", - .priority_high = 0, - }, - { - .number = U300_DMA_MSPRO_RX, - .name = "MSPRO RX", - .priority_high = 0, - }, - /* - * Don't set up device address, burst count or size of src - * or dst bus for this peripheral - handled by PrimeCell - * DMA extension. - */ - { - .number = U300_DMA_UART0_TX, - .name = "UART0 TX", - .priority_high = 0, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY, - }, - { - .number = U300_DMA_UART0_RX, - .name = "UART0 RX", - .priority_high = 0, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY, - }, - { - .number = U300_DMA_APEX_TX, - .name = "APEX TX", - .priority_high = 0, - }, - { - .number = U300_DMA_APEX_RX, - .name = "APEX RX", - .priority_high = 0, - }, - { - .number = U300_DMA_PCM_I2S0_TX, - .name = "PCM I2S0 TX", - .priority_high = 1, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_16_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_16_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_16_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - }, - { - .number = U300_DMA_PCM_I2S0_RX, - .name = "PCM I2S0 RX", - .priority_high = 1, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_16_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_DEST, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_16_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_DEST, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_16_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_DEST, - }, - { - .number = U300_DMA_PCM_I2S1_TX, - .name = "PCM I2S1 TX", - .priority_high = 1, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_16_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_16_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_16_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_SOURCE, - }, - { - .number = U300_DMA_PCM_I2S1_RX, - .name = "PCM I2S1 RX", - .priority_high = 1, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_16_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_DEST, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_16_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_DEST, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_BURST_COUNT_16_BYTES | - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_ENABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY | - COH901318_CX_CTRL_PRDD_DEST, - }, - { - .number = U300_DMA_XGAM_CDI, - .name = "XGAM CDI", - .priority_high = 0, - }, - { - .number = U300_DMA_XGAM_PDI, - .name = "XGAM PDI", - .priority_high = 0, - }, - /* - * Don't set up device address, burst count or size of src - * or dst bus for this peripheral - handled by PrimeCell - * DMA extension. - */ - { - .number = U300_DMA_SPI_TX, - .name = "SPI TX", - .priority_high = 0, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY, - }, - { - .number = U300_DMA_SPI_RX, - .name = "SPI RX", - .priority_high = 0, - .param.config = COH901318_CX_CFG_CH_DISABLE | - COH901318_CX_CFG_LCR_DISABLE | - COH901318_CX_CFG_TC_IRQ_ENABLE | - COH901318_CX_CFG_BE_IRQ_ENABLE, - .param.ctrl_lli_chained = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_DISABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY, - .param.ctrl_lli = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY, - .param.ctrl_lli_last = 0 | - COH901318_CX_CTRL_TC_ENABLE | - COH901318_CX_CTRL_MASTER_MODE_M1RW | - COH901318_CX_CTRL_TCP_DISABLE | - COH901318_CX_CTRL_TC_IRQ_ENABLE | - COH901318_CX_CTRL_HSP_ENABLE | - COH901318_CX_CTRL_HSS_DISABLE | - COH901318_CX_CTRL_DDMA_LEGACY, - - }, - { - .number = U300_DMA_GENERAL_PURPOSE_0, - .name = "GENERAL 00", - .priority_high = 0, - - .param.config = flags_memcpy_config, - .param.ctrl_lli_chained = flags_memcpy_lli_chained, - .param.ctrl_lli = flags_memcpy_lli, - .param.ctrl_lli_last = flags_memcpy_lli_last, - }, - { - .number = U300_DMA_GENERAL_PURPOSE_1, - .name = "GENERAL 01", - .priority_high = 0, - - .param.config = flags_memcpy_config, - .param.ctrl_lli_chained = flags_memcpy_lli_chained, - .param.ctrl_lli = flags_memcpy_lli, - .param.ctrl_lli_last = flags_memcpy_lli_last, - }, - { - .number = U300_DMA_GENERAL_PURPOSE_2, - .name = "GENERAL 02", - .priority_high = 0, - - .param.config = flags_memcpy_config, - .param.ctrl_lli_chained = flags_memcpy_lli_chained, - .param.ctrl_lli = flags_memcpy_lli, - .param.ctrl_lli_last = flags_memcpy_lli_last, - }, - { - .number = U300_DMA_GENERAL_PURPOSE_3, - .name = "GENERAL 03", - .priority_high = 0, - - .param.config = flags_memcpy_config, - .param.ctrl_lli_chained = flags_memcpy_lli_chained, - .param.ctrl_lli = flags_memcpy_lli, - .param.ctrl_lli_last = flags_memcpy_lli_last, - }, - { - .number = U300_DMA_GENERAL_PURPOSE_4, - .name = "GENERAL 04", - .priority_high = 0, - - .param.config = flags_memcpy_config, - .param.ctrl_lli_chained = flags_memcpy_lli_chained, - .param.ctrl_lli = flags_memcpy_lli, - .param.ctrl_lli_last = flags_memcpy_lli_last, - }, - { - .number = U300_DMA_GENERAL_PURPOSE_5, - .name = "GENERAL 05", - .priority_high = 0, - - .param.config = flags_memcpy_config, - .param.ctrl_lli_chained = flags_memcpy_lli_chained, - .param.ctrl_lli = flags_memcpy_lli, - .param.ctrl_lli_last = flags_memcpy_lli_last, - }, - { - .number = U300_DMA_GENERAL_PURPOSE_6, - .name = "GENERAL 06", - .priority_high = 0, - - .param.config = flags_memcpy_config, - .param.ctrl_lli_chained = flags_memcpy_lli_chained, - .param.ctrl_lli = flags_memcpy_lli, - .param.ctrl_lli_last = flags_memcpy_lli_last, - }, - { - .number = U300_DMA_GENERAL_PURPOSE_7, - .name = "GENERAL 07", - .priority_high = 0, - - .param.config = flags_memcpy_config, - .param.ctrl_lli_chained = flags_memcpy_lli_chained, - .param.ctrl_lli = flags_memcpy_lli, - .param.ctrl_lli_last = flags_memcpy_lli_last, - }, - { - .number = U300_DMA_GENERAL_PURPOSE_8, - .name = "GENERAL 08", - .priority_high = 0, - - .param.config = flags_memcpy_config, - .param.ctrl_lli_chained = flags_memcpy_lli_chained, - .param.ctrl_lli = flags_memcpy_lli, - .param.ctrl_lli_last = flags_memcpy_lli_last, - }, - { - .number = U300_DMA_UART1_TX, - .name = "UART1 TX", - .priority_high = 0, - }, - { - .number = U300_DMA_UART1_RX, - .name = "UART1 RX", - .priority_high = 0, - } -}; - -#define COHC_2_DEV(cohc) (&cohc->chan.dev->device) - -#ifdef VERBOSE_DEBUG -#define COH_DBG(x) ({ if (1) x; 0; }) -#else -#define COH_DBG(x) ({ if (0) x; 0; }) -#endif - -struct coh901318_desc { - struct dma_async_tx_descriptor desc; - struct list_head node; - struct scatterlist *sg; - unsigned int sg_len; - struct coh901318_lli *lli; - enum dma_transfer_direction dir; - unsigned long flags; - u32 head_config; - u32 head_ctrl; -}; - -struct coh901318_base { - struct device *dev; - void __iomem *virtbase; - unsigned int irq; - struct coh901318_pool pool; - struct powersave pm; - struct dma_device dma_slave; - struct dma_device dma_memcpy; - struct coh901318_chan *chans; -}; - -struct coh901318_chan { - spinlock_t lock; - int allocated; - int id; - int stopped; - - struct work_struct free_work; - struct dma_chan chan; - - struct tasklet_struct tasklet; - - struct list_head active; - struct list_head queue; - struct list_head free; - - unsigned long nbr_active_done; - unsigned long busy; - - struct dma_slave_config config; - u32 addr; - u32 ctrl; - - struct coh901318_base *base; -}; - -static void coh901318_list_print(struct coh901318_chan *cohc, - struct coh901318_lli *lli) -{ - struct coh901318_lli *l = lli; - int i = 0; - - while (l) { - dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src %pad" - ", dst %pad, link %pad virt_link_addr 0x%p\n", - i, l, l->control, &l->src_addr, &l->dst_addr, - &l->link_addr, l->virt_link_addr); - i++; - l = l->virt_link_addr; - } -} - -#ifdef CONFIG_DEBUG_FS - -#define COH901318_DEBUGFS_ASSIGN(x, y) (x = y) - -static struct coh901318_base *debugfs_dma_base; -static struct dentry *dma_dentry; - -static ssize_t coh901318_debugfs_read(struct file *file, char __user *buf, - size_t count, loff_t *f_pos) -{ - u64 started_channels = debugfs_dma_base->pm.started_channels; - int pool_count = debugfs_dma_base->pool.debugfs_pool_counter; - char *dev_buf; - char *tmp; - int ret; - int i; - - dev_buf = kmalloc(4*1024, GFP_KERNEL); - if (dev_buf == NULL) - return -ENOMEM; - tmp = dev_buf; - - tmp += sprintf(tmp, "DMA -- enabled dma channels\n"); - - for (i = 0; i < U300_DMA_CHANNELS; i++) { - if (started_channels & (1ULL << i)) - tmp += sprintf(tmp, "channel %d\n", i); - } - - tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count); - - ret = simple_read_from_buffer(buf, count, f_pos, dev_buf, - tmp - dev_buf); - kfree(dev_buf); - return ret; -} - -static const struct file_operations coh901318_debugfs_status_operations = { - .open = simple_open, - .read = coh901318_debugfs_read, - .llseek = default_llseek, -}; - - -static int __init init_coh901318_debugfs(void) -{ - - dma_dentry = debugfs_create_dir("dma", NULL); - - debugfs_create_file("status", S_IFREG | S_IRUGO, dma_dentry, NULL, - &coh901318_debugfs_status_operations); - return 0; -} - -static void __exit exit_coh901318_debugfs(void) -{ - debugfs_remove_recursive(dma_dentry); -} - -module_init(init_coh901318_debugfs); -module_exit(exit_coh901318_debugfs); -#else - -#define COH901318_DEBUGFS_ASSIGN(x, y) - -#endif /* CONFIG_DEBUG_FS */ - -static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan) -{ - return container_of(chan, struct coh901318_chan, chan); -} - -static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan, - struct dma_slave_config *config, - enum dma_transfer_direction direction); - -static inline const struct coh901318_params * -cohc_chan_param(struct coh901318_chan *cohc) -{ - return &chan_config[cohc->id].param; -} - -static inline const struct coh_dma_channel * -cohc_chan_conf(struct coh901318_chan *cohc) -{ - return &chan_config[cohc->id]; -} - -static void enable_powersave(struct coh901318_chan *cohc) -{ - unsigned long flags; - struct powersave *pm = &cohc->base->pm; - - spin_lock_irqsave(&pm->lock, flags); - - pm->started_channels &= ~(1ULL << cohc->id); - - spin_unlock_irqrestore(&pm->lock, flags); -} -static void disable_powersave(struct coh901318_chan *cohc) -{ - unsigned long flags; - struct powersave *pm = &cohc->base->pm; - - spin_lock_irqsave(&pm->lock, flags); - - pm->started_channels |= (1ULL << cohc->id); - - spin_unlock_irqrestore(&pm->lock, flags); -} - -static inline int coh901318_set_ctrl(struct coh901318_chan *cohc, u32 control) -{ - int channel = cohc->id; - void __iomem *virtbase = cohc->base->virtbase; - - writel(control, - virtbase + COH901318_CX_CTRL + - COH901318_CX_CTRL_SPACING * channel); - return 0; -} - -static inline int coh901318_set_conf(struct coh901318_chan *cohc, u32 conf) -{ - int channel = cohc->id; - void __iomem *virtbase = cohc->base->virtbase; - - writel(conf, - virtbase + COH901318_CX_CFG + - COH901318_CX_CFG_SPACING*channel); - return 0; -} - - -static int coh901318_start(struct coh901318_chan *cohc) -{ - u32 val; - int channel = cohc->id; - void __iomem *virtbase = cohc->base->virtbase; - - disable_powersave(cohc); - - val = readl(virtbase + COH901318_CX_CFG + - COH901318_CX_CFG_SPACING * channel); - - /* Enable channel */ - val |= COH901318_CX_CFG_CH_ENABLE; - writel(val, virtbase + COH901318_CX_CFG + - COH901318_CX_CFG_SPACING * channel); - - return 0; -} - -static int coh901318_prep_linked_list(struct coh901318_chan *cohc, - struct coh901318_lli *lli) -{ - int channel = cohc->id; - void __iomem *virtbase = cohc->base->virtbase; - - BUG_ON(readl(virtbase + COH901318_CX_STAT + - COH901318_CX_STAT_SPACING*channel) & - COH901318_CX_STAT_ACTIVE); - - writel(lli->src_addr, - virtbase + COH901318_CX_SRC_ADDR + - COH901318_CX_SRC_ADDR_SPACING * channel); - - writel(lli->dst_addr, virtbase + - COH901318_CX_DST_ADDR + - COH901318_CX_DST_ADDR_SPACING * channel); - - writel(lli->link_addr, virtbase + COH901318_CX_LNK_ADDR + - COH901318_CX_LNK_ADDR_SPACING * channel); - - writel(lli->control, virtbase + COH901318_CX_CTRL + - COH901318_CX_CTRL_SPACING * channel); - - return 0; -} - -static struct coh901318_desc * -coh901318_desc_get(struct coh901318_chan *cohc) -{ - struct coh901318_desc *desc; - - if (list_empty(&cohc->free)) { - /* alloc new desc because we're out of used ones - * TODO: alloc a pile of descs instead of just one, - * avoid many small allocations. - */ - desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT); - if (desc == NULL) - goto out; - INIT_LIST_HEAD(&desc->node); - dma_async_tx_descriptor_init(&desc->desc, &cohc->chan); - } else { - /* Reuse an old desc. */ - desc = list_first_entry(&cohc->free, - struct coh901318_desc, - node); - list_del(&desc->node); - /* Initialize it a bit so it's not insane */ - desc->sg = NULL; - desc->sg_len = 0; - desc->desc.callback = NULL; - desc->desc.callback_param = NULL; - } - - out: - return desc; -} - -static void -coh901318_desc_free(struct coh901318_chan *cohc, struct coh901318_desc *cohd) -{ - list_add_tail(&cohd->node, &cohc->free); -} - -/* call with irq lock held */ -static void -coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc) -{ - list_add_tail(&desc->node, &cohc->active); -} - -static struct coh901318_desc * -coh901318_first_active_get(struct coh901318_chan *cohc) -{ - return list_first_entry_or_null(&cohc->active, struct coh901318_desc, - node); -} - -static void -coh901318_desc_remove(struct coh901318_desc *cohd) -{ - list_del(&cohd->node); -} - -static void -coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc) -{ - list_add_tail(&desc->node, &cohc->queue); -} - -static struct coh901318_desc * -coh901318_first_queued(struct coh901318_chan *cohc) -{ - return list_first_entry_or_null(&cohc->queue, struct coh901318_desc, - node); -} - -static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli) -{ - struct coh901318_lli *lli = in_lli; - u32 bytes = 0; - - while (lli) { - bytes += lli->control & COH901318_CX_CTRL_TC_VALUE_MASK; - lli = lli->virt_link_addr; - } - return bytes; -} - -/* - * Get the number of bytes left to transfer on this channel, - * it is unwise to call this before stopping the channel for - * absolute measures, but for a rough guess you can still call - * it. - */ -static u32 coh901318_get_bytes_left(struct dma_chan *chan) -{ - struct coh901318_chan *cohc = to_coh901318_chan(chan); - struct coh901318_desc *cohd; - struct list_head *pos; - unsigned long flags; - u32 left = 0; - int i = 0; - - spin_lock_irqsave(&cohc->lock, flags); - - /* - * If there are many queued jobs, we iterate and add the - * size of them all. We take a special look on the first - * job though, since it is probably active. - */ - list_for_each(pos, &cohc->active) { - /* - * The first job in the list will be working on the - * hardware. The job can be stopped but still active, - * so that the transfer counter is somewhere inside - * the buffer. - */ - cohd = list_entry(pos, struct coh901318_desc, node); - - if (i == 0) { - struct coh901318_lli *lli; - dma_addr_t ladd; - - /* Read current transfer count value */ - left = readl(cohc->base->virtbase + - COH901318_CX_CTRL + - COH901318_CX_CTRL_SPACING * cohc->id) & - COH901318_CX_CTRL_TC_VALUE_MASK; - - /* See if the transfer is linked... */ - ladd = readl(cohc->base->virtbase + - COH901318_CX_LNK_ADDR + - COH901318_CX_LNK_ADDR_SPACING * - cohc->id) & - ~COH901318_CX_LNK_LINK_IMMEDIATE; - /* Single transaction */ - if (!ladd) - continue; - - /* - * Linked transaction, follow the lli, find the - * currently processing lli, and proceed to the next - */ - lli = cohd->lli; - while (lli && lli->link_addr != ladd) - lli = lli->virt_link_addr; - - if (lli) - lli = lli->virt_link_addr; - - /* - * Follow remaining lli links around to count the total - * number of bytes left - */ - left += coh901318_get_bytes_in_lli(lli); - } else { - left += coh901318_get_bytes_in_lli(cohd->lli); - } - i++; - } - - /* Also count bytes in the queued jobs */ - list_for_each(pos, &cohc->queue) { - cohd = list_entry(pos, struct coh901318_desc, node); - left += coh901318_get_bytes_in_lli(cohd->lli); - } - - spin_unlock_irqrestore(&cohc->lock, flags); - - return left; -} - -/* - * Pauses a transfer without losing data. Enables power save. - * Use this function in conjunction with coh901318_resume. - */ -static int coh901318_pause(struct dma_chan *chan) -{ - u32 val; - unsigned long flags; - struct coh901318_chan *cohc = to_coh901318_chan(chan); - int channel = cohc->id; - void __iomem *virtbase = cohc->base->virtbase; - - spin_lock_irqsave(&cohc->lock, flags); - - /* Disable channel in HW */ - val = readl(virtbase + COH901318_CX_CFG + - COH901318_CX_CFG_SPACING * channel); - - /* Stopping infinite transfer */ - if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 && - (val & COH901318_CX_CFG_CH_ENABLE)) - cohc->stopped = 1; - - - val &= ~COH901318_CX_CFG_CH_ENABLE; - /* Enable twice, HW bug work around */ - writel(val, virtbase + COH901318_CX_CFG + - COH901318_CX_CFG_SPACING * channel); - writel(val, virtbase + COH901318_CX_CFG + - COH901318_CX_CFG_SPACING * channel); - - /* Spin-wait for it to actually go inactive */ - while (readl(virtbase + COH901318_CX_STAT+COH901318_CX_STAT_SPACING * - channel) & COH901318_CX_STAT_ACTIVE) - cpu_relax(); - - /* Check if we stopped an active job */ - if ((readl(virtbase + COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING * - channel) & COH901318_CX_CTRL_TC_VALUE_MASK) > 0) - cohc->stopped = 1; - - enable_powersave(cohc); - - spin_unlock_irqrestore(&cohc->lock, flags); - return 0; -} - -/* Resumes a transfer that has been stopped via 300_dma_stop(..). - Power save is handled. -*/ -static int coh901318_resume(struct dma_chan *chan) -{ - u32 val; - unsigned long flags; - struct coh901318_chan *cohc = to_coh901318_chan(chan); - int channel = cohc->id; - - spin_lock_irqsave(&cohc->lock, flags); - - disable_powersave(cohc); - - if (cohc->stopped) { - /* Enable channel in HW */ - val = readl(cohc->base->virtbase + COH901318_CX_CFG + - COH901318_CX_CFG_SPACING * channel); - - val |= COH901318_CX_CFG_CH_ENABLE; - - writel(val, cohc->base->virtbase + COH901318_CX_CFG + - COH901318_CX_CFG_SPACING*channel); - - cohc->stopped = 0; - } - - spin_unlock_irqrestore(&cohc->lock, flags); - return 0; -} - -bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) -{ - unsigned long ch_nr = (unsigned long) chan_id; - - if (ch_nr == to_coh901318_chan(chan)->id) - return true; - - return false; -} -EXPORT_SYMBOL(coh901318_filter_id); - -struct coh901318_filter_args { - struct coh901318_base *base; - unsigned int ch_nr; -}; - -static bool coh901318_filter_base_and_id(struct dma_chan *chan, void *data) -{ - struct coh901318_filter_args *args = data; - - if (&args->base->dma_slave == chan->device && - args->ch_nr == to_coh901318_chan(chan)->id) - return true; - - return false; -} - -static struct dma_chan *coh901318_xlate(struct of_phandle_args *dma_spec, - struct of_dma *ofdma) -{ - struct coh901318_filter_args args = { - .base = ofdma->of_dma_data, - .ch_nr = dma_spec->args[0], - }; - dma_cap_mask_t cap; - dma_cap_zero(cap); - dma_cap_set(DMA_SLAVE, cap); - - return dma_request_channel(cap, coh901318_filter_base_and_id, &args); -} -/* - * DMA channel allocation - */ -static int coh901318_config(struct coh901318_chan *cohc, - struct coh901318_params *param) -{ - const struct coh901318_params *p; - int channel = cohc->id; - void __iomem *virtbase = cohc->base->virtbase; - - if (param) - p = param; - else - p = cohc_chan_param(cohc); - - /* Clear any pending BE or TC interrupt */ - if (channel < 32) { - writel(1 << channel, virtbase + COH901318_BE_INT_CLEAR1); - writel(1 << channel, virtbase + COH901318_TC_INT_CLEAR1); - } else { - writel(1 << (channel - 32), virtbase + - COH901318_BE_INT_CLEAR2); - writel(1 << (channel - 32), virtbase + - COH901318_TC_INT_CLEAR2); - } - - coh901318_set_conf(cohc, p->config); - coh901318_set_ctrl(cohc, p->ctrl_lli_last); - - return 0; -} - -/* must lock when calling this function - * start queued jobs, if any - * TODO: start all queued jobs in one go - * - * Returns descriptor if queued job is started otherwise NULL. - * If the queue is empty NULL is returned. - */ -static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) -{ - struct coh901318_desc *cohd; - - /* - * start queued jobs, if any - * TODO: transmit all queued jobs in one go - */ - cohd = coh901318_first_queued(cohc); - - if (cohd != NULL) { - /* Remove from queue */ - coh901318_desc_remove(cohd); - /* initiate DMA job */ - cohc->busy = 1; - - coh901318_desc_submit(cohc, cohd); - - /* Program the transaction head */ - coh901318_set_conf(cohc, cohd->head_config); - coh901318_set_ctrl(cohc, cohd->head_ctrl); - coh901318_prep_linked_list(cohc, cohd->lli); - - /* start dma job on this channel */ - coh901318_start(cohc); - - } - - return cohd; -} - -/* - * This tasklet is called from the interrupt handler to - * handle each descriptor (DMA job) that is sent to a channel. - */ -static void dma_tasklet(struct tasklet_struct *t) -{ - struct coh901318_chan *cohc = from_tasklet(cohc, t, tasklet); - struct coh901318_desc *cohd_fin; - unsigned long flags; - struct dmaengine_desc_callback cb; - - dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d" - " nbr_active_done %ld\n", __func__, - cohc->id, cohc->nbr_active_done); - - spin_lock_irqsave(&cohc->lock, flags); - - /* get first active descriptor entry from list */ - cohd_fin = coh901318_first_active_get(cohc); - - if (cohd_fin == NULL) - goto err; - - /* locate callback to client */ - dmaengine_desc_get_callback(&cohd_fin->desc, &cb); - - /* sign this job as completed on the channel */ - dma_cookie_complete(&cohd_fin->desc); - - /* release the lli allocation and remove the descriptor */ - coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); - - /* return desc to free-list */ - coh901318_desc_remove(cohd_fin); - coh901318_desc_free(cohc, cohd_fin); - - spin_unlock_irqrestore(&cohc->lock, flags); - - /* Call the callback when we're done */ - dmaengine_desc_callback_invoke(&cb, NULL); - - spin_lock_irqsave(&cohc->lock, flags); - - /* - * If another interrupt fired while the tasklet was scheduling, - * we don't get called twice, so we have this number of active - * counter that keep track of the number of IRQs expected to - * be handled for this channel. If there happen to be more than - * one IRQ to be ack:ed, we simply schedule this tasklet again. - */ - cohc->nbr_active_done--; - if (cohc->nbr_active_done) { - dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs " - "came in while we were scheduling this tasklet\n"); - if (cohc_chan_conf(cohc)->priority_high) - tasklet_hi_schedule(&cohc->tasklet); - else - tasklet_schedule(&cohc->tasklet); - } - - spin_unlock_irqrestore(&cohc->lock, flags); - - return; - - err: - spin_unlock_irqrestore(&cohc->lock, flags); - dev_err(COHC_2_DEV(cohc), "[%s] No active dma desc\n", __func__); -} - - -/* called from interrupt context */ -static void dma_tc_handle(struct coh901318_chan *cohc) -{ - /* - * If the channel is not allocated, then we shouldn't have - * any TC interrupts on it. - */ - if (!cohc->allocated) { - dev_err(COHC_2_DEV(cohc), "spurious interrupt from " - "unallocated channel\n"); - return; - } - - /* - * When we reach this point, at least one queue item - * should have been moved over from cohc->queue to - * cohc->active and run to completion, that is why we're - * getting a terminal count interrupt is it not? - * If you get this BUG() the most probable cause is that - * the individual nodes in the lli chain have IRQ enabled, - * so check your platform config for lli chain ctrl. - */ - BUG_ON(list_empty(&cohc->active)); - - cohc->nbr_active_done++; - - /* - * This attempt to take a job from cohc->queue, put it - * into cohc->active and start it. - */ - if (coh901318_queue_start(cohc) == NULL) - cohc->busy = 0; - - /* - * This tasklet will remove items from cohc->active - * and thus terminates them. - */ - if (cohc_chan_conf(cohc)->priority_high) - tasklet_hi_schedule(&cohc->tasklet); - else - tasklet_schedule(&cohc->tasklet); -} - - -static irqreturn_t dma_irq_handler(int irq, void *dev_id) -{ - u32 status1; - u32 status2; - int i; - int ch; - struct coh901318_base *base = dev_id; - struct coh901318_chan *cohc; - void __iomem *virtbase = base->virtbase; - - status1 = readl(virtbase + COH901318_INT_STATUS1); - status2 = readl(virtbase + COH901318_INT_STATUS2); - - if (unlikely(status1 == 0 && status2 == 0)) { - dev_warn(base->dev, "spurious DMA IRQ from no channel!\n"); - return IRQ_HANDLED; - } - - /* TODO: consider handle IRQ in tasklet here to - * minimize interrupt latency */ - - /* Check the first 32 DMA channels for IRQ */ - while (status1) { - /* Find first bit set, return as a number. */ - i = ffs(status1) - 1; - ch = i; - - cohc = &base->chans[ch]; - spin_lock(&cohc->lock); - - /* Mask off this bit */ - status1 &= ~(1 << i); - /* Check the individual channel bits */ - if (test_bit(i, virtbase + COH901318_BE_INT_STATUS1)) { - dev_crit(COHC_2_DEV(cohc), - "DMA bus error on channel %d!\n", ch); - BUG_ON(1); - /* Clear BE interrupt */ - __set_bit(i, virtbase + COH901318_BE_INT_CLEAR1); - } else { - /* Caused by TC, really? */ - if (unlikely(!test_bit(i, virtbase + - COH901318_TC_INT_STATUS1))) { - dev_warn(COHC_2_DEV(cohc), - "ignoring interrupt not caused by terminal count on channel %d\n", ch); - /* Clear TC interrupt */ - BUG_ON(1); - __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1); - } else { - /* Enable powersave if transfer has finished */ - if (!(readl(virtbase + COH901318_CX_STAT + - COH901318_CX_STAT_SPACING*ch) & - COH901318_CX_STAT_ENABLED)) { - enable_powersave(cohc); - } - - /* Must clear TC interrupt before calling - * dma_tc_handle - * in case tc_handle initiate a new dma job - */ - __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1); - - dma_tc_handle(cohc); - } - } - spin_unlock(&cohc->lock); - } - - /* Check the remaining 32 DMA channels for IRQ */ - while (status2) { - /* Find first bit set, return as a number. */ - i = ffs(status2) - 1; - ch = i + 32; - cohc = &base->chans[ch]; - spin_lock(&cohc->lock); - - /* Mask off this bit */ - status2 &= ~(1 << i); - /* Check the individual channel bits */ - if (test_bit(i, virtbase + COH901318_BE_INT_STATUS2)) { - dev_crit(COHC_2_DEV(cohc), - "DMA bus error on channel %d!\n", ch); - /* Clear BE interrupt */ - BUG_ON(1); - __set_bit(i, virtbase + COH901318_BE_INT_CLEAR2); - } else { - /* Caused by TC, really? */ - if (unlikely(!test_bit(i, virtbase + - COH901318_TC_INT_STATUS2))) { - dev_warn(COHC_2_DEV(cohc), - "ignoring interrupt not caused by terminal count on channel %d\n", ch); - /* Clear TC interrupt */ - __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2); - BUG_ON(1); - } else { - /* Enable powersave if transfer has finished */ - if (!(readl(virtbase + COH901318_CX_STAT + - COH901318_CX_STAT_SPACING*ch) & - COH901318_CX_STAT_ENABLED)) { - enable_powersave(cohc); - } - /* Must clear TC interrupt before calling - * dma_tc_handle - * in case tc_handle initiate a new dma job - */ - __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2); - - dma_tc_handle(cohc); - } - } - spin_unlock(&cohc->lock); - } - - return IRQ_HANDLED; -} - -static int coh901318_terminate_all(struct dma_chan *chan) -{ - unsigned long flags; - struct coh901318_chan *cohc = to_coh901318_chan(chan); - struct coh901318_desc *cohd; - void __iomem *virtbase = cohc->base->virtbase; - - /* The remainder of this function terminates the transfer */ - coh901318_pause(chan); - spin_lock_irqsave(&cohc->lock, flags); - - /* Clear any pending BE or TC interrupt */ - if (cohc->id < 32) { - writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1); - writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1); - } else { - writel(1 << (cohc->id - 32), virtbase + - COH901318_BE_INT_CLEAR2); - writel(1 << (cohc->id - 32), virtbase + - COH901318_TC_INT_CLEAR2); - } - - enable_powersave(cohc); - - while ((cohd = coh901318_first_active_get(cohc))) { - /* release the lli allocation*/ - coh901318_lli_free(&cohc->base->pool, &cohd->lli); - - /* return desc to free-list */ - coh901318_desc_remove(cohd); - coh901318_desc_free(cohc, cohd); - } - - while ((cohd = coh901318_first_queued(cohc))) { - /* release the lli allocation*/ - coh901318_lli_free(&cohc->base->pool, &cohd->lli); - - /* return desc to free-list */ - coh901318_desc_remove(cohd); - coh901318_desc_free(cohc, cohd); - } - - - cohc->nbr_active_done = 0; - cohc->busy = 0; - - spin_unlock_irqrestore(&cohc->lock, flags); - - return 0; -} - -static int coh901318_alloc_chan_resources(struct dma_chan *chan) -{ - struct coh901318_chan *cohc = to_coh901318_chan(chan); - unsigned long flags; - - dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n", - __func__, cohc->id); - - if (chan->client_count > 1) - return -EBUSY; - - spin_lock_irqsave(&cohc->lock, flags); - - coh901318_config(cohc, NULL); - - cohc->allocated = 1; - dma_cookie_init(chan); - - spin_unlock_irqrestore(&cohc->lock, flags); - - return 1; -} - -static void -coh901318_free_chan_resources(struct dma_chan *chan) -{ - struct coh901318_chan *cohc = to_coh901318_chan(chan); - int channel = cohc->id; - unsigned long flags; - - spin_lock_irqsave(&cohc->lock, flags); - - /* Disable HW */ - writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CFG + - COH901318_CX_CFG_SPACING*channel); - writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CTRL + - COH901318_CX_CTRL_SPACING*channel); - - cohc->allocated = 0; - - spin_unlock_irqrestore(&cohc->lock, flags); - - coh901318_terminate_all(chan); -} - - -static dma_cookie_t -coh901318_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct coh901318_desc *cohd = container_of(tx, struct coh901318_desc, - desc); - struct coh901318_chan *cohc = to_coh901318_chan(tx->chan); - unsigned long flags; - dma_cookie_t cookie; - - spin_lock_irqsave(&cohc->lock, flags); - cookie = dma_cookie_assign(tx); - - coh901318_desc_queue(cohc, cohd); - - spin_unlock_irqrestore(&cohc->lock, flags); - - return cookie; -} - -static struct dma_async_tx_descriptor * -coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, - size_t size, unsigned long flags) -{ - struct coh901318_lli *lli; - struct coh901318_desc *cohd; - unsigned long flg; - struct coh901318_chan *cohc = to_coh901318_chan(chan); - int lli_len; - u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; - int ret; - - spin_lock_irqsave(&cohc->lock, flg); - - dev_vdbg(COHC_2_DEV(cohc), - "[%s] channel %d src %pad dest %pad size %zu\n", - __func__, cohc->id, &src, &dest, size); - - if (flags & DMA_PREP_INTERRUPT) - /* Trigger interrupt after last lli */ - ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; - - lli_len = size >> MAX_DMA_PACKET_SIZE_SHIFT; - if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size) - lli_len++; - - lli = coh901318_lli_alloc(&cohc->base->pool, lli_len); - - if (lli == NULL) - goto err; - - ret = coh901318_lli_fill_memcpy( - &cohc->base->pool, lli, src, size, dest, - cohc_chan_param(cohc)->ctrl_lli_chained, - ctrl_last); - if (ret) - goto err; - - COH_DBG(coh901318_list_print(cohc, lli)); - - /* Pick a descriptor to handle this transfer */ - cohd = coh901318_desc_get(cohc); - cohd->lli = lli; - cohd->flags = flags; - cohd->desc.tx_submit = coh901318_tx_submit; - - spin_unlock_irqrestore(&cohc->lock, flg); - - return &cohd->desc; - err: - spin_unlock_irqrestore(&cohc->lock, flg); - return NULL; -} - -static struct dma_async_tx_descriptor * -coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) -{ - struct coh901318_chan *cohc = to_coh901318_chan(chan); - struct coh901318_lli *lli; - struct coh901318_desc *cohd; - const struct coh901318_params *params; - struct scatterlist *sg; - int len = 0; - int size; - int i; - u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained; - u32 ctrl = cohc_chan_param(cohc)->ctrl_lli; - u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; - u32 config; - unsigned long flg; - int ret; - - if (!sgl) - goto out; - if (sg_dma_len(sgl) == 0) - goto out; - - spin_lock_irqsave(&cohc->lock, flg); - - dev_vdbg(COHC_2_DEV(cohc), "[%s] sg_len %d dir %d\n", - __func__, sg_len, direction); - - if (flags & DMA_PREP_INTERRUPT) - /* Trigger interrupt after last lli */ - ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; - - params = cohc_chan_param(cohc); - config = params->config; - /* - * Add runtime-specific control on top, make - * sure the bits you set per peripheral channel are - * cleared in the default config from the platform. - */ - ctrl_chained |= cohc->ctrl; - ctrl_last |= cohc->ctrl; - ctrl |= cohc->ctrl; - - if (direction == DMA_MEM_TO_DEV) { - u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | - COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; - - config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY; - ctrl_chained |= tx_flags; - ctrl_last |= tx_flags; - ctrl |= tx_flags; - } else if (direction == DMA_DEV_TO_MEM) { - u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | - COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; - - config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY; - ctrl_chained |= rx_flags; - ctrl_last |= rx_flags; - ctrl |= rx_flags; - } else - goto err_direction; - - /* The dma only supports transmitting packages up to - * MAX_DMA_PACKET_SIZE. Calculate to total number of - * dma elemts required to send the entire sg list - */ - for_each_sg(sgl, sg, sg_len, i) { - unsigned int factor; - size = sg_dma_len(sg); - - if (size <= MAX_DMA_PACKET_SIZE) { - len++; - continue; - } - - factor = size >> MAX_DMA_PACKET_SIZE_SHIFT; - if ((factor << MAX_DMA_PACKET_SIZE_SHIFT) < size) - factor++; - - len += factor; - } - - pr_debug("Allocate %d lli:s for this transfer\n", len); - lli = coh901318_lli_alloc(&cohc->base->pool, len); - - if (lli == NULL) - goto err_dma_alloc; - - coh901318_dma_set_runtimeconfig(chan, &cohc->config, direction); - - /* initiate allocated lli list */ - ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len, - cohc->addr, - ctrl_chained, - ctrl, - ctrl_last, - direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); - if (ret) - goto err_lli_fill; - - - COH_DBG(coh901318_list_print(cohc, lli)); - - /* Pick a descriptor to handle this transfer */ - cohd = coh901318_desc_get(cohc); - cohd->head_config = config; - /* - * Set the default head ctrl for the channel to the one from the - * lli, things may have changed due to odd buffer alignment - * etc. - */ - cohd->head_ctrl = lli->control; - cohd->dir = direction; - cohd->flags = flags; - cohd->desc.tx_submit = coh901318_tx_submit; - cohd->lli = lli; - - spin_unlock_irqrestore(&cohc->lock, flg); - - return &cohd->desc; - err_lli_fill: - err_dma_alloc: - err_direction: - spin_unlock_irqrestore(&cohc->lock, flg); - out: - return NULL; -} - -static enum dma_status -coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, - struct dma_tx_state *txstate) -{ - struct coh901318_chan *cohc = to_coh901318_chan(chan); - enum dma_status ret; - - ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_COMPLETE || !txstate) - return ret; - - dma_set_residue(txstate, coh901318_get_bytes_left(chan)); - - if (ret == DMA_IN_PROGRESS && cohc->stopped) - ret = DMA_PAUSED; - - return ret; -} - -static void -coh901318_issue_pending(struct dma_chan *chan) -{ - struct coh901318_chan *cohc = to_coh901318_chan(chan); - unsigned long flags; - - spin_lock_irqsave(&cohc->lock, flags); - - /* - * Busy means that pending jobs are already being processed, - * and then there is no point in starting the queue: the - * terminal count interrupt on the channel will take the next - * job on the queue and execute it anyway. - */ - if (!cohc->busy) - coh901318_queue_start(cohc); - - spin_unlock_irqrestore(&cohc->lock, flags); -} - -/* - * Here we wrap in the runtime dma control interface - */ -struct burst_table { - int burst_8bit; - int burst_16bit; - int burst_32bit; - u32 reg; -}; - -static const struct burst_table burst_sizes[] = { - { - .burst_8bit = 64, - .burst_16bit = 32, - .burst_32bit = 16, - .reg = COH901318_CX_CTRL_BURST_COUNT_64_BYTES, - }, - { - .burst_8bit = 48, - .burst_16bit = 24, - .burst_32bit = 12, - .reg = COH901318_CX_CTRL_BURST_COUNT_48_BYTES, - }, - { - .burst_8bit = 32, - .burst_16bit = 16, - .burst_32bit = 8, - .reg = COH901318_CX_CTRL_BURST_COUNT_32_BYTES, - }, - { - .burst_8bit = 16, - .burst_16bit = 8, - .burst_32bit = 4, - .reg = COH901318_CX_CTRL_BURST_COUNT_16_BYTES, - }, - { - .burst_8bit = 8, - .burst_16bit = 4, - .burst_32bit = 2, - .reg = COH901318_CX_CTRL_BURST_COUNT_8_BYTES, - }, - { - .burst_8bit = 4, - .burst_16bit = 2, - .burst_32bit = 1, - .reg = COH901318_CX_CTRL_BURST_COUNT_4_BYTES, - }, - { - .burst_8bit = 2, - .burst_16bit = 1, - .burst_32bit = 0, - .reg = COH901318_CX_CTRL_BURST_COUNT_2_BYTES, - }, - { - .burst_8bit = 1, - .burst_16bit = 0, - .burst_32bit = 0, - .reg = COH901318_CX_CTRL_BURST_COUNT_1_BYTE, - }, -}; - -static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan, - struct dma_slave_config *config, - enum dma_transfer_direction direction) -{ - struct coh901318_chan *cohc = to_coh901318_chan(chan); - dma_addr_t addr; - enum dma_slave_buswidth addr_width; - u32 maxburst; - u32 ctrl = 0; - int i = 0; - - /* We only support mem to per or per to mem transfers */ - if (direction == DMA_DEV_TO_MEM) { - addr = config->src_addr; - addr_width = config->src_addr_width; - maxburst = config->src_maxburst; - } else if (direction == DMA_MEM_TO_DEV) { - addr = config->dst_addr; - addr_width = config->dst_addr_width; - maxburst = config->dst_maxburst; - } else { - dev_err(COHC_2_DEV(cohc), "illegal channel mode\n"); - return -EINVAL; - } - - dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n", - addr_width); - switch (addr_width) { - case DMA_SLAVE_BUSWIDTH_1_BYTE: - ctrl |= - COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS | - COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS; - - while (i < ARRAY_SIZE(burst_sizes)) { - if (burst_sizes[i].burst_8bit <= maxburst) - break; - i++; - } - - break; - case DMA_SLAVE_BUSWIDTH_2_BYTES: - ctrl |= - COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS | - COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS; - - while (i < ARRAY_SIZE(burst_sizes)) { - if (burst_sizes[i].burst_16bit <= maxburst) - break; - i++; - } - - break; - case DMA_SLAVE_BUSWIDTH_4_BYTES: - /* Direction doesn't matter here, it's 32/32 bits */ - ctrl |= - COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | - COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS; - - while (i < ARRAY_SIZE(burst_sizes)) { - if (burst_sizes[i].burst_32bit <= maxburst) - break; - i++; - } - - break; - default: - dev_err(COHC_2_DEV(cohc), - "bad runtimeconfig: alien address width\n"); - return -EINVAL; - } - - ctrl |= burst_sizes[i].reg; - dev_dbg(COHC_2_DEV(cohc), - "selected burst size %d bytes for address width %d bytes, maxburst %d\n", - burst_sizes[i].burst_8bit, addr_width, maxburst); - - cohc->addr = addr; - cohc->ctrl = ctrl; - - return 0; -} - -static int coh901318_dma_slave_config(struct dma_chan *chan, - struct dma_slave_config *config) -{ - struct coh901318_chan *cohc = to_coh901318_chan(chan); - - memcpy(&cohc->config, config, sizeof(*config)); - - return 0; -} - -static void coh901318_base_init(struct dma_device *dma, const int *pick_chans, - struct coh901318_base *base) -{ - int chans_i; - int i = 0; - struct coh901318_chan *cohc; - - INIT_LIST_HEAD(&dma->channels); - - for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) { - for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) { - cohc = &base->chans[i]; - - cohc->base = base; - cohc->chan.device = dma; - cohc->id = i; - - /* TODO: do we really need this lock if only one - * client is connected to each channel? - */ - - spin_lock_init(&cohc->lock); - - cohc->nbr_active_done = 0; - cohc->busy = 0; - INIT_LIST_HEAD(&cohc->free); - INIT_LIST_HEAD(&cohc->active); - INIT_LIST_HEAD(&cohc->queue); - - tasklet_setup(&cohc->tasklet, dma_tasklet); - - list_add_tail(&cohc->chan.device_node, - &dma->channels); - } - } -} - -static int __init coh901318_probe(struct platform_device *pdev) -{ - int err = 0; - struct coh901318_base *base; - int irq; - struct resource *io; - - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!io) - return -ENODEV; - - /* Map DMA controller registers to virtual memory */ - if (devm_request_mem_region(&pdev->dev, - io->start, - resource_size(io), - pdev->dev.driver->name) == NULL) - return -ENOMEM; - - base = devm_kzalloc(&pdev->dev, - ALIGN(sizeof(struct coh901318_base), 4) + - U300_DMA_CHANNELS * - sizeof(struct coh901318_chan), - GFP_KERNEL); - if (!base) - return -ENOMEM; - - base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4); - - base->virtbase = devm_ioremap(&pdev->dev, io->start, resource_size(io)); - if (!base->virtbase) - return -ENOMEM; - - base->dev = &pdev->dev; - spin_lock_init(&base->pm.lock); - base->pm.started_channels = 0; - - COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base); - - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0, - "coh901318", base); - if (err) - return err; - - base->irq = irq; - - err = coh901318_pool_create(&base->pool, &pdev->dev, - sizeof(struct coh901318_lli), - 32); - if (err) - return err; - - /* init channels for device transfers */ - coh901318_base_init(&base->dma_slave, dma_slave_channels, - base); - - dma_cap_zero(base->dma_slave.cap_mask); - dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); - - base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources; - base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources; - base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; - base->dma_slave.device_tx_status = coh901318_tx_status; - base->dma_slave.device_issue_pending = coh901318_issue_pending; - base->dma_slave.device_config = coh901318_dma_slave_config; - base->dma_slave.device_pause = coh901318_pause; - base->dma_slave.device_resume = coh901318_resume; - base->dma_slave.device_terminate_all = coh901318_terminate_all; - base->dma_slave.dev = &pdev->dev; - - err = dma_async_device_register(&base->dma_slave); - - if (err) - goto err_register_slave; - - /* init channels for memcpy */ - coh901318_base_init(&base->dma_memcpy, dma_memcpy_channels, - base); - - dma_cap_zero(base->dma_memcpy.cap_mask); - dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); - - base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources; - base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources; - base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; - base->dma_memcpy.device_tx_status = coh901318_tx_status; - base->dma_memcpy.device_issue_pending = coh901318_issue_pending; - base->dma_memcpy.device_config = coh901318_dma_slave_config; - base->dma_memcpy.device_pause = coh901318_pause; - base->dma_memcpy.device_resume = coh901318_resume; - base->dma_memcpy.device_terminate_all = coh901318_terminate_all; - base->dma_memcpy.dev = &pdev->dev; - /* - * This controller can only access address at even 32bit boundaries, - * i.e. 2^2 - */ - base->dma_memcpy.copy_align = DMAENGINE_ALIGN_4_BYTES; - err = dma_async_device_register(&base->dma_memcpy); - - if (err) - goto err_register_memcpy; - - err = of_dma_controller_register(pdev->dev.of_node, coh901318_xlate, - base); - if (err) - goto err_register_of_dma; - - platform_set_drvdata(pdev, base); - dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%p\n", - base->virtbase); - - return err; - - err_register_of_dma: - dma_async_device_unregister(&base->dma_memcpy); - err_register_memcpy: - dma_async_device_unregister(&base->dma_slave); - err_register_slave: - coh901318_pool_destroy(&base->pool); - return err; -} -static void coh901318_base_remove(struct coh901318_base *base, const int *pick_chans) -{ - int chans_i; - int i = 0; - struct coh901318_chan *cohc; - - for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) { - for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) { - cohc = &base->chans[i]; - - tasklet_kill(&cohc->tasklet); - } - } - -} - -static int coh901318_remove(struct platform_device *pdev) -{ - struct coh901318_base *base = platform_get_drvdata(pdev); - - devm_free_irq(&pdev->dev, base->irq, base); - - coh901318_base_remove(base, dma_slave_channels); - coh901318_base_remove(base, dma_memcpy_channels); - - of_dma_controller_free(pdev->dev.of_node); - dma_async_device_unregister(&base->dma_memcpy); - dma_async_device_unregister(&base->dma_slave); - coh901318_pool_destroy(&base->pool); - return 0; -} - -static const struct of_device_id coh901318_dt_match[] = { - { .compatible = "stericsson,coh901318" }, - {}, -}; - -static struct platform_driver coh901318_driver = { - .remove = coh901318_remove, - .driver = { - .name = "coh901318", - .of_match_table = coh901318_dt_match, - }, -}; - -static int __init coh901318_init(void) -{ - return platform_driver_probe(&coh901318_driver, coh901318_probe); -} -subsys_initcall(coh901318_init); - -static void __exit coh901318_exit(void) -{ - platform_driver_unregister(&coh901318_driver); -} -module_exit(coh901318_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Per Friden"); diff --git a/drivers/dma/coh901318.h b/drivers/dma/coh901318.h deleted file mode 100644 index bbf533600558..000000000000 --- a/drivers/dma/coh901318.h +++ /dev/null @@ -1,141 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2007-2013 ST-Ericsson - * DMA driver for COH 901 318 - * Author: Per Friden <per.friden@stericsson.com> - */ - -#ifndef COH901318_H -#define COH901318_H - -#define MAX_DMA_PACKET_SIZE_SHIFT 11 -#define MAX_DMA_PACKET_SIZE (1 << MAX_DMA_PACKET_SIZE_SHIFT) - -struct device; - -struct coh901318_pool { - spinlock_t lock; - struct dma_pool *dmapool; - struct device *dev; - -#ifdef CONFIG_DEBUG_FS - int debugfs_pool_counter; -#endif -}; - -/** - * struct coh901318_lli - linked list item for DMAC - * @control: control settings for DMAC - * @src_addr: transfer source address - * @dst_addr: transfer destination address - * @link_addr: physical address to next lli - * @virt_link_addr: virtual address of next lli (only used by pool_free) - * @phy_this: physical address of current lli (only used by pool_free) - */ -struct coh901318_lli { - u32 control; - dma_addr_t src_addr; - dma_addr_t dst_addr; - dma_addr_t link_addr; - - void *virt_link_addr; - dma_addr_t phy_this; -}; - -/** - * coh901318_pool_create() - Creates an dma pool for lli:s - * @pool: pool handle - * @dev: dma device - * @lli_nbr: number of lli:s in the pool - * @algin: address alignemtn of lli:s - * returns 0 on success otherwise none zero - */ -int coh901318_pool_create(struct coh901318_pool *pool, - struct device *dev, - size_t lli_nbr, size_t align); - -/** - * coh901318_pool_destroy() - Destroys the dma pool - * @pool: pool handle - * returns 0 on success otherwise none zero - */ -int coh901318_pool_destroy(struct coh901318_pool *pool); - -/** - * coh901318_lli_alloc() - Allocates a linked list - * - * @pool: pool handle - * @len: length to list - * return: none NULL if success otherwise NULL - */ -struct coh901318_lli * -coh901318_lli_alloc(struct coh901318_pool *pool, - unsigned int len); - -/** - * coh901318_lli_free() - Returns the linked list items to the pool - * @pool: pool handle - * @lli: reference to lli pointer to be freed - */ -void coh901318_lli_free(struct coh901318_pool *pool, - struct coh901318_lli **lli); - -/** - * coh901318_lli_fill_memcpy() - Prepares the lli:s for dma memcpy - * @pool: pool handle - * @lli: allocated lli - * @src: src address - * @size: transfer size - * @dst: destination address - * @ctrl_chained: ctrl for chained lli - * @ctrl_last: ctrl for the last lli - * returns number of CPU interrupts for the lli, negative on error. - */ -int -coh901318_lli_fill_memcpy(struct coh901318_pool *pool, - struct coh901318_lli *lli, - dma_addr_t src, unsigned int size, - dma_addr_t dst, u32 ctrl_chained, u32 ctrl_last); - -/** - * coh901318_lli_fill_single() - Prepares the lli:s for dma single transfer - * @pool: pool handle - * @lli: allocated lli - * @buf: transfer buffer - * @size: transfer size - * @dev_addr: address of periphal - * @ctrl_chained: ctrl for chained lli - * @ctrl_last: ctrl for the last lli - * @dir: direction of transfer (to or from device) - * returns number of CPU interrupts for the lli, negative on error. - */ -int -coh901318_lli_fill_single(struct coh901318_pool *pool, - struct coh901318_lli *lli, - dma_addr_t buf, unsigned int size, - dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last, - enum dma_transfer_direction dir); - -/** - * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer - * @pool: pool handle - * @lli: allocated lli - * @sg: scatter gather list - * @nents: number of entries in sg - * @dev_addr: address of periphal - * @ctrl_chained: ctrl for chained lli - * @ctrl: ctrl of middle lli - * @ctrl_last: ctrl for the last lli - * @dir: direction of transfer (to or from device) - * @ctrl_irq_mask: ctrl mask for CPU interrupt - * returns number of CPU interrupts for the lli, negative on error. - */ -int -coh901318_lli_fill_sg(struct coh901318_pool *pool, - struct coh901318_lli *lli, - struct scatterlist *sg, unsigned int nents, - dma_addr_t dev_addr, u32 ctrl_chained, - u32 ctrl, u32 ctrl_last, - enum dma_transfer_direction dir, u32 ctrl_irq_mask); - -#endif /* COH901318_H */ diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c deleted file mode 100644 index 6b6c2fd0865a..000000000000 --- a/drivers/dma/coh901318_lli.c +++ /dev/null @@ -1,313 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * driver/dma/coh901318_lli.c - * - * Copyright (C) 2007-2009 ST-Ericsson - * Support functions for handling lli for dma - * Author: Per Friden <per.friden@stericsson.com> - */ - -#include <linux/spinlock.h> -#include <linux/memory.h> -#include <linux/gfp.h> -#include <linux/dmapool.h> -#include <linux/dmaengine.h> - -#include "coh901318.h" - -#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG)) -#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0) -#define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add) -#else -#define DEBUGFS_POOL_COUNTER_RESET(pool) -#define DEBUGFS_POOL_COUNTER_ADD(pool, add) -#endif - -static struct coh901318_lli * -coh901318_lli_next(struct coh901318_lli *data) -{ - if (data == NULL || data->link_addr == 0) - return NULL; - - return (struct coh901318_lli *) data->virt_link_addr; -} - -int coh901318_pool_create(struct coh901318_pool *pool, - struct device *dev, - size_t size, size_t align) -{ - spin_lock_init(&pool->lock); - pool->dev = dev; - pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0); - - DEBUGFS_POOL_COUNTER_RESET(pool); - return 0; -} - -int coh901318_pool_destroy(struct coh901318_pool *pool) -{ - - dma_pool_destroy(pool->dmapool); - return 0; -} - -struct coh901318_lli * -coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) -{ - int i; - struct coh901318_lli *head; - struct coh901318_lli *lli; - struct coh901318_lli *lli_prev; - dma_addr_t phy; - - if (len == 0) - return NULL; - - spin_lock(&pool->lock); - - head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy); - - if (head == NULL) - goto err; - - DEBUGFS_POOL_COUNTER_ADD(pool, 1); - - lli = head; - lli->phy_this = phy; - lli->link_addr = 0x00000000; - lli->virt_link_addr = NULL; - - for (i = 1; i < len; i++) { - lli_prev = lli; - - lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy); - - if (lli == NULL) - goto err_clean_up; - - DEBUGFS_POOL_COUNTER_ADD(pool, 1); - lli->phy_this = phy; - lli->link_addr = 0x00000000; - lli->virt_link_addr = NULL; - - lli_prev->link_addr = phy; - lli_prev->virt_link_addr = lli; - } - - spin_unlock(&pool->lock); - - return head; - - err: - spin_unlock(&pool->lock); - return NULL; - - err_clean_up: - lli_prev->link_addr = 0x00000000U; - spin_unlock(&pool->lock); - coh901318_lli_free(pool, &head); - return NULL; -} - -void coh901318_lli_free(struct coh901318_pool *pool, - struct coh901318_lli **lli) -{ - struct coh901318_lli *l; - struct coh901318_lli *next; - - if (lli == NULL) - return; - - l = *lli; - - if (l == NULL) - return; - - spin_lock(&pool->lock); - - while (l->link_addr) { - next = l->virt_link_addr; - dma_pool_free(pool->dmapool, l, l->phy_this); - DEBUGFS_POOL_COUNTER_ADD(pool, -1); - l = next; - } - dma_pool_free(pool->dmapool, l, l->phy_this); - DEBUGFS_POOL_COUNTER_ADD(pool, -1); - - spin_unlock(&pool->lock); - *lli = NULL; -} - -int -coh901318_lli_fill_memcpy(struct coh901318_pool *pool, - struct coh901318_lli *lli, - dma_addr_t source, unsigned int size, - dma_addr_t destination, u32 ctrl_chained, - u32 ctrl_eom) -{ - int s = size; - dma_addr_t src = source; - dma_addr_t dst = destination; - - lli->src_addr = src; - lli->dst_addr = dst; - - while (lli->link_addr) { - lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE; - lli->src_addr = src; - lli->dst_addr = dst; - - s -= MAX_DMA_PACKET_SIZE; - lli = coh901318_lli_next(lli); - - src += MAX_DMA_PACKET_SIZE; - dst += MAX_DMA_PACKET_SIZE; - } - - lli->control = ctrl_eom | s; - lli->src_addr = src; - lli->dst_addr = dst; - - return 0; -} - -int -coh901318_lli_fill_single(struct coh901318_pool *pool, - struct coh901318_lli *lli, - dma_addr_t buf, unsigned int size, - dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom, - enum dma_transfer_direction dir) -{ - int s = size; - dma_addr_t src; - dma_addr_t dst; - - - if (dir == DMA_MEM_TO_DEV) { - src = buf; - dst = dev_addr; - - } else if (dir == DMA_DEV_TO_MEM) { - - src = dev_addr; - dst = buf; - } else { - return -EINVAL; - } - - while (lli->link_addr) { - size_t block_size = MAX_DMA_PACKET_SIZE; - lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE; - - /* If we are on the next-to-final block and there will - * be less than half a DMA packet left for the last - * block, then we want to make this block a little - * smaller to balance the sizes. This is meant to - * avoid too small transfers if the buffer size is - * (MAX_DMA_PACKET_SIZE*N + 1) */ - if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2)) - block_size = MAX_DMA_PACKET_SIZE/2; - - s -= block_size; - lli->src_addr = src; - lli->dst_addr = dst; - - lli = coh901318_lli_next(lli); - - if (dir == DMA_MEM_TO_DEV) - src += block_size; - else if (dir == DMA_DEV_TO_MEM) - dst += block_size; - } - - lli->control = ctrl_eom | s; - lli->src_addr = src; - lli->dst_addr = dst; - - return 0; -} - -int -coh901318_lli_fill_sg(struct coh901318_pool *pool, - struct coh901318_lli *lli, - struct scatterlist *sgl, unsigned int nents, - dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl, - u32 ctrl_last, - enum dma_transfer_direction dir, u32 ctrl_irq_mask) -{ - int i; - struct scatterlist *sg; - u32 ctrl_sg; - dma_addr_t src = 0; - dma_addr_t dst = 0; - u32 bytes_to_transfer; - u32 elem_size; - - if (lli == NULL) - goto err; - - spin_lock(&pool->lock); - - if (dir == DMA_MEM_TO_DEV) - dst = dev_addr; - else if (dir == DMA_DEV_TO_MEM) - src = dev_addr; - else - goto err; - - for_each_sg(sgl, sg, nents, i) { - if (sg_is_chain(sg)) { - /* sg continues to the next sg-element don't - * send ctrl_finish until the last - * sg-element in the chain - */ - ctrl_sg = ctrl_chained; - } else if (i == nents - 1) - ctrl_sg = ctrl_last; - else - ctrl_sg = ctrl ? ctrl : ctrl_last; - - - if (dir == DMA_MEM_TO_DEV) - /* increment source address */ - src = sg_dma_address(sg); - else - /* increment destination address */ - dst = sg_dma_address(sg); - - bytes_to_transfer = sg_dma_len(sg); - - while (bytes_to_transfer) { - u32 val; - - if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) { - elem_size = MAX_DMA_PACKET_SIZE; - val = ctrl_chained; - } else { - elem_size = bytes_to_transfer; - val = ctrl_sg; - } - - lli->control = val | elem_size; - lli->src_addr = src; - lli->dst_addr = dst; - - if (dir == DMA_DEV_TO_MEM) - dst += elem_size; - else - src += elem_size; - - BUG_ON(lli->link_addr & 3); - - bytes_to_transfer -= elem_size; - lli = coh901318_lli_next(lli); - } - - } - spin_unlock(&pool->lock); - - return 0; - err: - spin_unlock(&pool->lock); - return -EINVAL; -} diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 612d353648cf..ebee94dbd630 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -1004,6 +1004,18 @@ static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = { JZ_SOC_DATA_BREAK_LINKS, }; +static const struct jz4780_dma_soc_data jz4760_dma_soc_data = { + .nb_channels = 5, + .transfer_ord_max = 6, + .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, +}; + +static const struct jz4780_dma_soc_data jz4760b_dma_soc_data = { + .nb_channels = 5, + .transfer_ord_max = 6, + .flags = JZ_SOC_DATA_PER_CHAN_PM, +}; + static const struct jz4780_dma_soc_data jz4770_dma_soc_data = { .nb_channels = 6, .transfer_ord_max = 6, @@ -1031,6 +1043,8 @@ static const struct jz4780_dma_soc_data x1830_dma_soc_data = { static const struct of_device_id jz4780_dma_dt_match[] = { { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data }, { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data }, + { .compatible = "ingenic,jz4760-dma", .data = &jz4760_dma_soc_data }, + { .compatible = "ingenic,jz4760b-dma", .data = &jz4760b_dma_soc_data }, { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data }, { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data }, { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data }, diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index e164f3295f5d..d9e4ac3edb4e 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -12,15 +12,20 @@ #include <linux/device.h> #include <linux/dmaengine.h> #include <linux/dmapool.h> +#include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_dma.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/property.h> +#include <linux/slab.h> #include <linux/types.h> #include "dw-axi-dmac.h" @@ -195,43 +200,56 @@ static inline const char *axi_chan_name(struct axi_dma_chan *chan) return dma_chan_name(&chan->vc.chan); } -static struct axi_dma_desc *axi_desc_get(struct axi_dma_chan *chan) +static struct axi_dma_desc *axi_desc_alloc(u32 num) { - struct dw_axi_dma *dw = chan->chip->dw; struct axi_dma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (!desc) + return NULL; + + desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT); + if (!desc->hw_desc) { + kfree(desc); + return NULL; + } + + return desc; +} + +static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan, + dma_addr_t *addr) +{ + struct axi_dma_lli *lli; dma_addr_t phys; - desc = dma_pool_zalloc(dw->desc_pool, GFP_NOWAIT, &phys); - if (unlikely(!desc)) { + lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys); + if (unlikely(!lli)) { dev_err(chan2dev(chan), "%s: not enough descriptors available\n", axi_chan_name(chan)); return NULL; } atomic_inc(&chan->descs_allocated); - INIT_LIST_HEAD(&desc->xfer_list); - desc->vd.tx.phys = phys; - desc->chan = chan; + *addr = phys; - return desc; + return lli; } static void axi_desc_put(struct axi_dma_desc *desc) { struct axi_dma_chan *chan = desc->chan; - struct dw_axi_dma *dw = chan->chip->dw; - struct axi_dma_desc *child, *_next; - unsigned int descs_put = 0; + int count = atomic_read(&chan->descs_allocated); + struct axi_dma_hw_desc *hw_desc; + int descs_put; - list_for_each_entry_safe(child, _next, &desc->xfer_list, xfer_list) { - list_del(&child->xfer_list); - dma_pool_free(dw->desc_pool, child, child->vd.tx.phys); - descs_put++; + for (descs_put = 0; descs_put < count; descs_put++) { + hw_desc = &desc->hw_desc[descs_put]; + dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp); } - dma_pool_free(dw->desc_pool, desc, desc->vd.tx.phys); - descs_put++; - + kfree(desc->hw_desc); + kfree(desc); atomic_sub(descs_put, &chan->descs_allocated); dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n", axi_chan_name(chan), descs_put, @@ -248,19 +266,41 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); - enum dma_status ret; + struct virt_dma_desc *vdesc; + enum dma_status status; + u32 completed_length; + unsigned long flags; + u32 completed_blocks; + size_t bytes = 0; + u32 length; + u32 len; - ret = dma_cookie_status(dchan, cookie, txstate); + status = dma_cookie_status(dchan, cookie, txstate); + if (status == DMA_COMPLETE || !txstate) + return status; - if (chan->is_paused && ret == DMA_IN_PROGRESS) - ret = DMA_PAUSED; + spin_lock_irqsave(&chan->vc.lock, flags); - return ret; + vdesc = vchan_find_desc(&chan->vc, cookie); + if (vdesc) { + length = vd_to_axi_desc(vdesc)->length; + completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks; + len = vd_to_axi_desc(vdesc)->hw_desc[0].len; + completed_length = completed_blocks * len; + bytes = length - completed_length; + } else { + bytes = vd_to_axi_desc(vdesc)->length; + } + + spin_unlock_irqrestore(&chan->vc.lock, flags); + dma_set_residue(txstate, bytes); + + return status; } -static void write_desc_llp(struct axi_dma_desc *desc, dma_addr_t adr) +static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr) { - desc->lli.llp = cpu_to_le64(adr); + desc->lli->llp = cpu_to_le64(adr); } static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr) @@ -268,6 +308,29 @@ static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr) axi_chan_iowrite64(chan, CH_LLP, adr); } +static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set) +{ + u32 offset = DMAC_APB_BYTE_WR_CH_EN; + u32 reg_width, val; + + if (!chan->chip->apb_regs) { + dev_dbg(chan->chip->dev, "apb_regs not initialized\n"); + return; + } + + reg_width = __ffs(chan->config.dst_addr_width); + if (reg_width == DWAXIDMAC_TRANS_WIDTH_16) + offset = DMAC_APB_HALFWORD_WR_CH_EN; + + val = ioread32(chan->chip->apb_regs + offset); + + if (set) + val |= BIT(chan->id); + else + val &= ~BIT(chan->id); + + iowrite32(val, chan->chip->apb_regs + offset); +} /* Called in chan locked context */ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, struct axi_dma_desc *first) @@ -293,9 +356,26 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, priority << CH_CFG_H_PRIORITY_POS | DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS | DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS); + switch (chan->direction) { + case DMA_MEM_TO_DEV: + dw_axi_dma_set_byte_halfword(chan, true); + reg |= (chan->config.device_fc ? + DWAXIDMAC_TT_FC_MEM_TO_PER_DST : + DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC) + << CH_CFG_H_TT_FC_POS; + break; + case DMA_DEV_TO_MEM: + reg |= (chan->config.device_fc ? + DWAXIDMAC_TT_FC_PER_TO_MEM_SRC : + DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC) + << CH_CFG_H_TT_FC_POS; + break; + default: + break; + } axi_chan_iowrite32(chan, CH_CFG_H, reg); - write_chan_llp(chan, first->vd.tx.phys | lms); + write_chan_llp(chan, first->hw_desc[0].llp | lms); irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR; axi_chan_irq_sig_set(chan, irq_mask); @@ -333,6 +413,13 @@ static void dma_chan_issue_pending(struct dma_chan *dchan) spin_unlock_irqrestore(&chan->vc.lock, flags); } +static void dw_axi_dma_synchronize(struct dma_chan *dchan) +{ + struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); + + vchan_synchronize(&chan->vc); +} + static int dma_chan_alloc_chan_resources(struct dma_chan *dchan) { struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); @@ -344,6 +431,15 @@ static int dma_chan_alloc_chan_resources(struct dma_chan *dchan) return -EBUSY; } + /* LLI address must be aligned to a 64-byte boundary */ + chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)), + chan->chip->dev, + sizeof(struct axi_dma_lli), + 64, 0); + if (!chan->desc_pool) { + dev_err(chan2dev(chan), "No memory for descriptors\n"); + return -ENOMEM; + } dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan)); pm_runtime_get(chan->chip->dev); @@ -365,6 +461,8 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan) vchan_free_chan_resources(&chan->vc); + dma_pool_destroy(chan->desc_pool); + chan->desc_pool = NULL; dev_vdbg(dchan2dev(dchan), "%s: free resources, descriptor still allocated: %u\n", axi_chan_name(chan), atomic_read(&chan->descs_allocated)); @@ -372,73 +470,398 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan) pm_runtime_put(chan->chip->dev); } +static void dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip, + u32 handshake_num, bool set) +{ + unsigned long start = 0; + unsigned long reg_value; + unsigned long reg_mask; + unsigned long reg_set; + unsigned long mask; + unsigned long val; + + if (!chip->apb_regs) { + dev_dbg(chip->dev, "apb_regs not initialized\n"); + return; + } + + /* + * An unused DMA channel has a default value of 0x3F. + * Lock the DMA channel by assign a handshake number to the channel. + * Unlock the DMA channel by assign 0x3F to the channel. + */ + if (set) { + reg_set = UNUSED_CHANNEL; + val = handshake_num; + } else { + reg_set = handshake_num; + val = UNUSED_CHANNEL; + } + + reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0); + + for_each_set_clump8(start, reg_mask, ®_value, 64) { + if (reg_mask == reg_set) { + mask = GENMASK_ULL(start + 7, start); + reg_value &= ~mask; + reg_value |= rol64(val, start); + lo_hi_writeq(reg_value, + chip->apb_regs + DMAC_APB_HW_HS_SEL_0); + break; + } + } +} + /* * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI * as 1, it understands that the current block is the final block in the * transfer and completes the DMA transfer operation at the end of current * block transfer. */ -static void set_desc_last(struct axi_dma_desc *desc) +static void set_desc_last(struct axi_dma_hw_desc *desc) { u32 val; - val = le32_to_cpu(desc->lli.ctl_hi); + val = le32_to_cpu(desc->lli->ctl_hi); val |= CH_CTL_H_LLI_LAST; - desc->lli.ctl_hi = cpu_to_le32(val); + desc->lli->ctl_hi = cpu_to_le32(val); } -static void write_desc_sar(struct axi_dma_desc *desc, dma_addr_t adr) +static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr) { - desc->lli.sar = cpu_to_le64(adr); + desc->lli->sar = cpu_to_le64(adr); } -static void write_desc_dar(struct axi_dma_desc *desc, dma_addr_t adr) +static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr) { - desc->lli.dar = cpu_to_le64(adr); + desc->lli->dar = cpu_to_le64(adr); } -static void set_desc_src_master(struct axi_dma_desc *desc) +static void set_desc_src_master(struct axi_dma_hw_desc *desc) { u32 val; /* Select AXI0 for source master */ - val = le32_to_cpu(desc->lli.ctl_lo); + val = le32_to_cpu(desc->lli->ctl_lo); val &= ~CH_CTL_L_SRC_MAST; - desc->lli.ctl_lo = cpu_to_le32(val); + desc->lli->ctl_lo = cpu_to_le32(val); } -static void set_desc_dest_master(struct axi_dma_desc *desc) +static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc, + struct axi_dma_desc *desc) { u32 val; /* Select AXI1 for source master if available */ - val = le32_to_cpu(desc->lli.ctl_lo); + val = le32_to_cpu(hw_desc->lli->ctl_lo); if (desc->chan->chip->dw->hdata->nr_masters > 1) val |= CH_CTL_L_DST_MAST; else val &= ~CH_CTL_L_DST_MAST; - desc->lli.ctl_lo = cpu_to_le32(val); + hw_desc->lli->ctl_lo = cpu_to_le32(val); +} + +static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan, + struct axi_dma_hw_desc *hw_desc, + dma_addr_t mem_addr, size_t len) +{ + unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width); + unsigned int reg_width; + unsigned int mem_width; + dma_addr_t device_addr; + size_t axi_block_ts; + size_t block_ts; + u32 ctllo, ctlhi; + u32 burst_len; + + axi_block_ts = chan->chip->dw->hdata->block_size[chan->id]; + + mem_width = __ffs(data_width | mem_addr | len); + if (mem_width > DWAXIDMAC_TRANS_WIDTH_32) + mem_width = DWAXIDMAC_TRANS_WIDTH_32; + + if (!IS_ALIGNED(mem_addr, 4)) { + dev_err(chan->chip->dev, "invalid buffer alignment\n"); + return -EINVAL; + } + + switch (chan->direction) { + case DMA_MEM_TO_DEV: + reg_width = __ffs(chan->config.dst_addr_width); + device_addr = chan->config.dst_addr; + ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS | + mem_width << CH_CTL_L_SRC_WIDTH_POS | + DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS | + DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS; + block_ts = len >> mem_width; + break; + case DMA_DEV_TO_MEM: + reg_width = __ffs(chan->config.src_addr_width); + device_addr = chan->config.src_addr; + ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS | + mem_width << CH_CTL_L_DST_WIDTH_POS | + DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS | + DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS; + block_ts = len >> reg_width; + break; + default: + return -EINVAL; + } + + if (block_ts > axi_block_ts) + return -EINVAL; + + hw_desc->lli = axi_desc_get(chan, &hw_desc->llp); + if (unlikely(!hw_desc->lli)) + return -ENOMEM; + + ctlhi = CH_CTL_H_LLI_VALID; + + if (chan->chip->dw->hdata->restrict_axi_burst_len) { + burst_len = chan->chip->dw->hdata->axi_rw_burst_len; + ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN | + burst_len << CH_CTL_H_ARLEN_POS | + burst_len << CH_CTL_H_AWLEN_POS; + } + + hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi); + + if (chan->direction == DMA_MEM_TO_DEV) { + write_desc_sar(hw_desc, mem_addr); + write_desc_dar(hw_desc, device_addr); + } else { + write_desc_sar(hw_desc, device_addr); + write_desc_dar(hw_desc, mem_addr); + } + + hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1); + + ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS | + DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS; + hw_desc->lli->ctl_lo = cpu_to_le32(ctllo); + + set_desc_src_master(hw_desc); + + hw_desc->len = len; + return 0; +} + +static size_t calculate_block_len(struct axi_dma_chan *chan, + dma_addr_t dma_addr, size_t buf_len, + enum dma_transfer_direction direction) +{ + u32 data_width, reg_width, mem_width; + size_t axi_block_ts, block_len; + + axi_block_ts = chan->chip->dw->hdata->block_size[chan->id]; + + switch (direction) { + case DMA_MEM_TO_DEV: + data_width = BIT(chan->chip->dw->hdata->m_data_width); + mem_width = __ffs(data_width | dma_addr | buf_len); + if (mem_width > DWAXIDMAC_TRANS_WIDTH_32) + mem_width = DWAXIDMAC_TRANS_WIDTH_32; + + block_len = axi_block_ts << mem_width; + break; + case DMA_DEV_TO_MEM: + reg_width = __ffs(chan->config.src_addr_width); + block_len = axi_block_ts << reg_width; + break; + default: + block_len = 0; + } + + return block_len; +} + +static struct dma_async_tx_descriptor * +dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction direction, + unsigned long flags) +{ + struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); + struct axi_dma_hw_desc *hw_desc = NULL; + struct axi_dma_desc *desc = NULL; + dma_addr_t src_addr = dma_addr; + u32 num_periods, num_segments; + size_t axi_block_len; + u32 total_segments; + u32 segment_len; + unsigned int i; + int status; + u64 llp = 0; + u8 lms = 0; /* Select AXI0 master for LLI fetching */ + + num_periods = buf_len / period_len; + + axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction); + if (axi_block_len == 0) + return NULL; + + num_segments = DIV_ROUND_UP(period_len, axi_block_len); + segment_len = DIV_ROUND_UP(period_len, num_segments); + + total_segments = num_periods * num_segments; + + desc = axi_desc_alloc(total_segments); + if (unlikely(!desc)) + goto err_desc_get; + + chan->direction = direction; + desc->chan = chan; + chan->cyclic = true; + desc->length = 0; + desc->period_len = period_len; + + for (i = 0; i < total_segments; i++) { + hw_desc = &desc->hw_desc[i]; + + status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr, + segment_len); + if (status < 0) + goto err_desc_get; + + desc->length += hw_desc->len; + /* Set end-of-link to the linked descriptor, so that cyclic + * callback function can be triggered during interrupt. + */ + set_desc_last(hw_desc); + + src_addr += segment_len; + } + + llp = desc->hw_desc[0].llp; + + /* Managed transfer list */ + do { + hw_desc = &desc->hw_desc[--total_segments]; + write_desc_llp(hw_desc, llp | lms); + llp = hw_desc->llp; + } while (total_segments); + + dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true); + + return vchan_tx_prep(&chan->vc, &desc->vd, flags); + +err_desc_get: + if (desc) + axi_desc_put(desc); + + return NULL; +} + +static struct dma_async_tx_descriptor * +dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, + unsigned int sg_len, + enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); + struct axi_dma_hw_desc *hw_desc = NULL; + struct axi_dma_desc *desc = NULL; + u32 num_segments, segment_len; + unsigned int loop = 0; + struct scatterlist *sg; + size_t axi_block_len; + u32 len, num_sgs = 0; + unsigned int i; + dma_addr_t mem; + int status; + u64 llp = 0; + u8 lms = 0; /* Select AXI0 master for LLI fetching */ + + if (unlikely(!is_slave_direction(direction) || !sg_len)) + return NULL; + + mem = sg_dma_address(sgl); + len = sg_dma_len(sgl); + + axi_block_len = calculate_block_len(chan, mem, len, direction); + if (axi_block_len == 0) + return NULL; + + for_each_sg(sgl, sg, sg_len, i) + num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len); + + desc = axi_desc_alloc(num_sgs); + if (unlikely(!desc)) + goto err_desc_get; + + desc->chan = chan; + desc->length = 0; + chan->direction = direction; + + for_each_sg(sgl, sg, sg_len, i) { + mem = sg_dma_address(sg); + len = sg_dma_len(sg); + num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len); + segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments); + + do { + hw_desc = &desc->hw_desc[loop++]; + status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len); + if (status < 0) + goto err_desc_get; + + desc->length += hw_desc->len; + len -= segment_len; + mem += segment_len; + } while (len >= segment_len); + } + + /* Set end-of-link to the last link descriptor of list */ + set_desc_last(&desc->hw_desc[num_sgs - 1]); + + /* Managed transfer list */ + do { + hw_desc = &desc->hw_desc[--num_sgs]; + write_desc_llp(hw_desc, llp | lms); + llp = hw_desc->llp; + } while (num_sgs); + + dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true); + + return vchan_tx_prep(&chan->vc, &desc->vd, flags); + +err_desc_get: + if (desc) + axi_desc_put(desc); + + return NULL; } static struct dma_async_tx_descriptor * dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, dma_addr_t src_adr, size_t len, unsigned long flags) { - struct axi_dma_desc *first = NULL, *desc = NULL, *prev = NULL; struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); size_t block_ts, max_block_ts, xfer_len; - u32 xfer_width, reg; + struct axi_dma_hw_desc *hw_desc = NULL; + struct axi_dma_desc *desc = NULL; + u32 xfer_width, reg, num; + u64 llp = 0; u8 lms = 0; /* Select AXI0 master for LLI fetching */ dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx", axi_chan_name(chan), &src_adr, &dst_adr, len, flags); max_block_ts = chan->chip->dw->hdata->block_size[chan->id]; + xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len); + num = DIV_ROUND_UP(len, max_block_ts << xfer_width); + desc = axi_desc_alloc(num); + if (unlikely(!desc)) + goto err_desc_get; + desc->chan = chan; + num = 0; + desc->length = 0; while (len) { xfer_len = len; + hw_desc = &desc->hw_desc[num]; /* * Take care for the alignment. * Actually source and destination widths can be different, but @@ -457,13 +880,13 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, xfer_len = max_block_ts << xfer_width; } - desc = axi_desc_get(chan); - if (unlikely(!desc)) + hw_desc->lli = axi_desc_get(chan, &hw_desc->llp); + if (unlikely(!hw_desc->lli)) goto err_desc_get; - write_desc_sar(desc, src_adr); - write_desc_dar(desc, dst_adr); - desc->lli.block_ts_lo = cpu_to_le32(block_ts - 1); + write_desc_sar(hw_desc, src_adr); + write_desc_dar(hw_desc, dst_adr); + hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1); reg = CH_CTL_H_LLI_VALID; if (chan->chip->dw->hdata->restrict_axi_burst_len) { @@ -474,7 +897,7 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, CH_CTL_H_AWLEN_EN | burst_len << CH_CTL_H_AWLEN_POS); } - desc->lli.ctl_hi = cpu_to_le32(reg); + hw_desc->lli->ctl_hi = cpu_to_le32(reg); reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS | DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS | @@ -482,62 +905,68 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, xfer_width << CH_CTL_L_SRC_WIDTH_POS | DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS | DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS); - desc->lli.ctl_lo = cpu_to_le32(reg); + hw_desc->lli->ctl_lo = cpu_to_le32(reg); - set_desc_src_master(desc); - set_desc_dest_master(desc); - - /* Manage transfer list (xfer_list) */ - if (!first) { - first = desc; - } else { - list_add_tail(&desc->xfer_list, &first->xfer_list); - write_desc_llp(prev, desc->vd.tx.phys | lms); - } - prev = desc; + set_desc_src_master(hw_desc); + set_desc_dest_master(hw_desc, desc); + hw_desc->len = xfer_len; + desc->length += hw_desc->len; /* update the length and addresses for the next loop cycle */ len -= xfer_len; dst_adr += xfer_len; src_adr += xfer_len; + num++; } - /* Total len of src/dest sg == 0, so no descriptor were allocated */ - if (unlikely(!first)) - return NULL; - /* Set end-of-link to the last link descriptor of list */ - set_desc_last(desc); + set_desc_last(&desc->hw_desc[num - 1]); + /* Managed transfer list */ + do { + hw_desc = &desc->hw_desc[--num]; + write_desc_llp(hw_desc, llp | lms); + llp = hw_desc->llp; + } while (num); - return vchan_tx_prep(&chan->vc, &first->vd, flags); + return vchan_tx_prep(&chan->vc, &desc->vd, flags); err_desc_get: - if (first) - axi_desc_put(first); + if (desc) + axi_desc_put(desc); return NULL; } +static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); + + memcpy(&chan->config, config, sizeof(*config)); + + return 0; +} + static void axi_chan_dump_lli(struct axi_dma_chan *chan, - struct axi_dma_desc *desc) + struct axi_dma_hw_desc *desc) { dev_err(dchan2dev(&chan->vc.chan), "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x", - le64_to_cpu(desc->lli.sar), - le64_to_cpu(desc->lli.dar), - le64_to_cpu(desc->lli.llp), - le32_to_cpu(desc->lli.block_ts_lo), - le32_to_cpu(desc->lli.ctl_hi), - le32_to_cpu(desc->lli.ctl_lo)); + le64_to_cpu(desc->lli->sar), + le64_to_cpu(desc->lli->dar), + le64_to_cpu(desc->lli->llp), + le32_to_cpu(desc->lli->block_ts_lo), + le32_to_cpu(desc->lli->ctl_hi), + le32_to_cpu(desc->lli->ctl_lo)); } static void axi_chan_list_dump_lli(struct axi_dma_chan *chan, struct axi_dma_desc *desc_head) { - struct axi_dma_desc *desc; + int count = atomic_read(&chan->descs_allocated); + int i; - axi_chan_dump_lli(chan, desc_head); - list_for_each_entry(desc, &desc_head->xfer_list, xfer_list) - axi_chan_dump_lli(chan, desc); + for (i = 0; i < count; i++) + axi_chan_dump_lli(chan, &desc_head->hw_desc[i]); } static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) @@ -570,8 +999,13 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan) { + int count = atomic_read(&chan->descs_allocated); + struct axi_dma_hw_desc *hw_desc; + struct axi_dma_desc *desc; struct virt_dma_desc *vd; unsigned long flags; + u64 llp; + int i; spin_lock_irqsave(&chan->vc.lock, flags); if (unlikely(axi_chan_is_hw_enable(chan))) { @@ -582,12 +1016,34 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan) /* The completed descriptor currently is in the head of vc list */ vd = vchan_next_desc(&chan->vc); - /* Remove the completed descriptor from issued list before completing */ - list_del(&vd->node); - vchan_cookie_complete(vd); - /* Submit queued descriptors after processing the completed ones */ - axi_chan_start_first_queued(chan); + if (chan->cyclic) { + desc = vd_to_axi_desc(vd); + if (desc) { + llp = lo_hi_readq(chan->chan_regs + CH_LLP); + for (i = 0; i < count; i++) { + hw_desc = &desc->hw_desc[i]; + if (hw_desc->llp == llp) { + axi_chan_irq_clear(chan, hw_desc->lli->status_lo); + hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID; + desc->completed_blocks = i; + + if (((hw_desc->len * (i + 1)) % desc->period_len) == 0) + vchan_cyclic_callback(vd); + break; + } + } + + axi_chan_enable(chan); + } + } else { + /* Remove the completed descriptor from issued list before completing */ + list_del(&vd->node); + vchan_cookie_complete(vd); + + /* Submit queued descriptors after processing the completed ones */ + axi_chan_start_first_queued(chan); + } spin_unlock_irqrestore(&chan->vc.lock, flags); } @@ -627,15 +1083,31 @@ static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id) static int dma_chan_terminate_all(struct dma_chan *dchan) { struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); + u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT; unsigned long flags; + u32 val; + int ret; LIST_HEAD(head); - spin_lock_irqsave(&chan->vc.lock, flags); - axi_chan_disable(chan); + ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val, + !(val & chan_active), 1000, 10000); + if (ret == -ETIMEDOUT) + dev_warn(dchan2dev(dchan), + "%s failed to stop\n", axi_chan_name(chan)); + + if (chan->direction != DMA_MEM_TO_MEM) + dw_axi_dma_set_hw_channel(chan->chip, + chan->hw_handshake_num, false); + if (chan->direction == DMA_MEM_TO_DEV) + dw_axi_dma_set_byte_halfword(chan, false); + + spin_lock_irqsave(&chan->vc.lock, flags); + vchan_get_all_descriptors(&chan->vc, &head); + chan->cyclic = false; spin_unlock_irqrestore(&chan->vc.lock, flags); vchan_dma_desc_free_list(&chan->vc, &head); @@ -746,6 +1218,22 @@ static int __maybe_unused axi_dma_runtime_resume(struct device *dev) return axi_dma_resume(chip); } +static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct dw_axi_dma *dw = ofdma->of_dma_data; + struct axi_dma_chan *chan; + struct dma_chan *dchan; + + dchan = dma_get_any_slave_channel(&dw->dma); + if (!dchan) + return NULL; + + chan = dchan_to_axi_dma_chan(dchan); + chan->hw_handshake_num = dma_spec->args[0]; + return dchan; +} + static int parse_device_properties(struct axi_dma_chip *chip) { struct device *dev = chip->dev; @@ -816,6 +1304,7 @@ static int parse_device_properties(struct axi_dma_chip *chip) static int dw_probe(struct platform_device *pdev) { + struct device_node *node = pdev->dev.of_node; struct axi_dma_chip *chip; struct resource *mem; struct dw_axi_dma *dw; @@ -848,6 +1337,12 @@ static int dw_probe(struct platform_device *pdev) if (IS_ERR(chip->regs)) return PTR_ERR(chip->regs); + if (of_device_is_compatible(node, "intel,kmb-axi-dma")) { + chip->apb_regs = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(chip->apb_regs)) + return PTR_ERR(chip->apb_regs); + } + chip->core_clk = devm_clk_get(chip->dev, "core-clk"); if (IS_ERR(chip->core_clk)) return PTR_ERR(chip->core_clk); @@ -870,13 +1365,6 @@ static int dw_probe(struct platform_device *pdev) if (ret) return ret; - /* Lli address must be aligned to a 64-byte boundary */ - dw->desc_pool = dmam_pool_create(KBUILD_MODNAME, chip->dev, - sizeof(struct axi_dma_desc), 64, 0); - if (!dw->desc_pool) { - dev_err(chip->dev, "No memory for descriptors dma pool\n"); - return -ENOMEM; - } INIT_LIST_HEAD(&dw->dma.channels); for (i = 0; i < hdata->nr_channels; i++) { @@ -893,13 +1381,16 @@ static int dw_probe(struct platform_device *pdev) /* Set capabilities */ dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); + dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); + dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask); /* DMA capabilities */ dw->dma.chancnt = hdata->nr_channels; dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS; dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS; dw->dma.directions = BIT(DMA_MEM_TO_MEM); - dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); + dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; dw->dma.dev = chip->dev; dw->dma.device_tx_status = dma_chan_tx_status; @@ -912,7 +1403,18 @@ static int dw_probe(struct platform_device *pdev) dw->dma.device_free_chan_resources = dma_chan_free_chan_resources; dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy; + dw->dma.device_synchronize = dw_axi_dma_synchronize; + dw->dma.device_config = dw_axi_dma_chan_slave_config; + dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg; + dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic; + /* + * Synopsis DesignWare AxiDMA datasheet mentioned Maximum + * supported blocks is 1024. Device register width is 4 bytes. + * Therefore, set constraint to 1024 * 4. + */ + dw->dma.dev->dma_parms = &dw->dma_parms; + dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE); platform_set_drvdata(pdev, chip); pm_runtime_enable(chip->dev); @@ -935,6 +1437,13 @@ static int dw_probe(struct platform_device *pdev) if (ret) goto err_pm_disable; + /* Register with OF helpers for DMA lookups */ + ret = of_dma_controller_register(pdev->dev.of_node, + dw_axi_dma_of_xlate, dw); + if (ret < 0) + dev_warn(&pdev->dev, + "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n"); + dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n", dw->hdata->nr_channels); @@ -968,6 +1477,8 @@ static int dw_remove(struct platform_device *pdev) devm_free_irq(chip->dev, chip->irq, chip); + of_dma_controller_free(chip->dev->of_node); + list_for_each_entry_safe(chan, _chan, &dw->dma.channels, vc.chan.device_node) { list_del(&chan->vc.chan.device_node); @@ -983,6 +1494,7 @@ static const struct dev_pm_ops dw_axi_dma_pm_ops = { static const struct of_device_id dw_dma_of_id_table[] = { { .compatible = "snps,axi-dma-1.01a" }, + { .compatible = "intel,kmb-axi-dma" }, {} }; MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h index 18b6014cf9b4..b69897887c76 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h @@ -37,10 +37,16 @@ struct axi_dma_chan { struct axi_dma_chip *chip; void __iomem *chan_regs; u8 id; + u8 hw_handshake_num; atomic_t descs_allocated; + struct dma_pool *desc_pool; struct virt_dma_chan vc; + struct axi_dma_desc *desc; + struct dma_slave_config config; + enum dma_transfer_direction direction; + bool cyclic; /* these other elements are all protected by vc.lock */ bool is_paused; }; @@ -48,7 +54,7 @@ struct axi_dma_chan { struct dw_axi_dma { struct dma_device dma; struct dw_axi_dma_hcfg *hdata; - struct dma_pool *desc_pool; + struct device_dma_parameters dma_parms; /* channels */ struct axi_dma_chan *chan; @@ -58,6 +64,7 @@ struct axi_dma_chip { struct device *dev; int irq; void __iomem *regs; + void __iomem *apb_regs; struct clk *core_clk; struct clk *cfgr_clk; struct dw_axi_dma *dw; @@ -80,12 +87,20 @@ struct __packed axi_dma_lli { __le32 reserved_hi; }; +struct axi_dma_hw_desc { + struct axi_dma_lli *lli; + dma_addr_t llp; + u32 len; +}; + struct axi_dma_desc { - struct axi_dma_lli lli; + struct axi_dma_hw_desc *hw_desc; struct virt_dma_desc vd; struct axi_dma_chan *chan; - struct list_head xfer_list; + u32 completed_blocks; + u32 length; + u32 period_len; }; static inline struct device *dchan2dev(struct dma_chan *dchan) @@ -157,6 +172,19 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan) #define CH_INTSIGNAL_ENA 0x090 /* R/W Chan Interrupt Signal Enable */ #define CH_INTCLEAR 0x098 /* W Chan Interrupt Clear */ +/* These Apb registers are used by Intel KeemBay SoC */ +#define DMAC_APB_CFG 0x000 /* DMAC Apb Configuration Register */ +#define DMAC_APB_STAT 0x004 /* DMAC Apb Status Register */ +#define DMAC_APB_DEBUG_STAT_0 0x008 /* DMAC Apb Debug Status Register 0 */ +#define DMAC_APB_DEBUG_STAT_1 0x00C /* DMAC Apb Debug Status Register 1 */ +#define DMAC_APB_HW_HS_SEL_0 0x010 /* DMAC Apb HW HS register 0 */ +#define DMAC_APB_HW_HS_SEL_1 0x014 /* DMAC Apb HW HS register 1 */ +#define DMAC_APB_LPI 0x018 /* DMAC Apb Low Power Interface Reg */ +#define DMAC_APB_BYTE_WR_CH_EN 0x01C /* DMAC Apb Byte Write Enable */ +#define DMAC_APB_HALFWORD_WR_CH_EN 0x020 /* DMAC Halfword write enables */ + +#define UNUSED_CHANNEL 0x3F /* Set unused DMA channel to 0x3F */ +#define MAX_BLOCK_SIZE 0x1000 /* 1024 blocks * 4 bytes data width */ /* DMAC_CFG */ #define DMAC_EN_POS 0 diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 0feb323bae1e..f8459cc5315d 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1214,6 +1214,7 @@ static int fsldma_of_probe(struct platform_device *op) { struct fsldma_device *fdev; struct device_node *child; + unsigned int i; int err; fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); @@ -1292,6 +1293,10 @@ static int fsldma_of_probe(struct platform_device *op) return 0; out_free_fdev: + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + if (fdev->chan[i]) + fsl_dma_chan_remove(fdev->chan[i]); + } irq_dispose_mapping(fdev->irq); iounmap(fdev->regs); out_free: @@ -1314,6 +1319,7 @@ static int fsldma_of_remove(struct platform_device *op) if (fdev->chan[i]) fsl_dma_chan_remove(fdev->chan[i]); } + irq_dispose_mapping(fdev->irq); iounmap(fdev->regs); kfree(fdev); diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c index 07cc7320a614..9045a6f7f589 100644 --- a/drivers/dma/hsu/pci.c +++ b/drivers/dma/hsu/pci.c @@ -26,22 +26,12 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev) { struct hsu_dma_chip *chip = dev; - struct pci_dev *pdev = to_pci_dev(chip->dev); u32 dmaisr; u32 status; unsigned short i; int ret = 0; int err; - /* - * On Intel Tangier B0 and Anniedale the interrupt line, disregarding - * to have different numbers, is shared between HSU DMA and UART IPs. - * Thus on such SoCs we are expecting that IRQ handler is called in - * UART driver only. - */ - if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA) - return IRQ_HANDLED; - dmaisr = readl(chip->regs + HSU_PCI_DMAISR); for (i = 0; i < chip->hsu->nr_channels; i++) { if (dmaisr & 0x1) { @@ -105,6 +95,17 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_register_irq; + /* + * On Intel Tangier B0 and Anniedale the interrupt line, disregarding + * to have different numbers, is shared between HSU DMA and UART IPs. + * Thus on such SoCs we are expecting that IRQ handler is called in + * UART driver only. Instead of handling the spurious interrupt + * from HSU DMA here and waste CPU time and delay HSU UART interrupt + * handling, disable the interrupt entirely. + */ + if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA) + disable_irq_nosync(chip->irq); + pci_set_drvdata(pdev, chip); return 0; diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index 71fd6e4c42cd..a15e50126434 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -165,6 +165,7 @@ int idxd_register_dma_device(struct idxd_device *idxd) INIT_LIST_HEAD(&dma->channels); dma->dev = &idxd->pdev->dev; + dma_cap_set(DMA_PRIVATE, dma->cap_mask); dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); dma->device_release = idxd_dma_release; diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index fa04acd5582a..085a0c3b62c6 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -26,12 +26,16 @@ MODULE_VERSION(IDXD_DRIVER_VERSION); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Intel Corporation"); +static bool sva = true; +module_param(sva, bool, 0644); +MODULE_PARM_DESC(sva, "Toggle SVA support on/off"); + #define DRV_NAME "idxd" bool support_enqcmd; static struct idr idxd_idrs[IDXD_TYPE_MAX]; -static struct mutex idxd_idr_lock; +static DEFINE_MUTEX(idxd_idr_lock); static struct pci_device_id idxd_pci_tbl[] = { /* DSA ver 1.0 platforms */ @@ -341,12 +345,14 @@ static int idxd_probe(struct idxd_device *idxd) dev_dbg(dev, "IDXD reset complete\n"); - if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) { + if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { rc = idxd_enable_system_pasid(idxd); if (rc < 0) dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc); else set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); + } else if (!sva) { + dev_warn(dev, "User forced SVA off via module param.\n"); } idxd_read_caps(idxd); @@ -547,7 +553,6 @@ static int __init idxd_init_module(void) else support_enqcmd = true; - mutex_init(&idxd_idr_lock); for (i = 0; i < IDXD_TYPE_MAX; i++) idr_init(&idxd_idrs[i]); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 41ba21eea7c8..d5590c08db51 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1952,8 +1952,6 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, static int sdma_probe(struct platform_device *pdev) { - const struct of_device_id *of_id = - of_match_device(sdma_dt_ids, &pdev->dev); struct device_node *np = pdev->dev.of_node; struct device_node *spba_bus; const char *fw_name; @@ -1961,17 +1959,9 @@ static int sdma_probe(struct platform_device *pdev) int irq; struct resource *iores; struct resource spba_res; - struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev); int i; struct sdma_engine *sdma; s32 *saddr_arr; - const struct sdma_driver_data *drvdata = NULL; - - drvdata = of_id->data; - if (!drvdata) { - dev_err(&pdev->dev, "unable to find driver data\n"); - return -EINVAL; - } ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) @@ -1984,7 +1974,7 @@ static int sdma_probe(struct platform_device *pdev) spin_lock_init(&sdma->channel_0_lock); sdma->dev = &pdev->dev; - sdma->drvdata = drvdata; + sdma->drvdata = of_device_get_match_data(sdma->dev); irq = platform_get_irq(pdev, 0); if (irq < 0) @@ -2063,8 +2053,6 @@ static int sdma_probe(struct platform_device *pdev) if (sdma->drvdata->script_addrs) sdma_add_scripts(sdma, sdma->drvdata->script_addrs); - if (pdata && pdata->script_addrs) - sdma_add_scripts(sdma, pdata->script_addrs); sdma->dma_device.dev = &pdev->dev; @@ -2110,30 +2098,18 @@ static int sdma_probe(struct platform_device *pdev) } /* - * Kick off firmware loading as the very last step: - * attempt to load firmware only if we're not on the error path, because - * the firmware callback requires a fully functional and allocated sdma - * instance. + * Because that device tree does not encode ROM script address, + * the RAM script in firmware is mandatory for device tree + * probe, otherwise it fails. */ - if (pdata) { - ret = sdma_get_firmware(sdma, pdata->fw_name); - if (ret) - dev_warn(&pdev->dev, "failed to get firmware from platform data\n"); + ret = of_property_read_string(np, "fsl,sdma-ram-script-name", + &fw_name); + if (ret) { + dev_warn(&pdev->dev, "failed to get firmware name\n"); } else { - /* - * Because that device tree does not encode ROM script address, - * the RAM script in firmware is mandatory for device tree - * probe, otherwise it fails. - */ - ret = of_property_read_string(np, "fsl,sdma-ram-script-name", - &fw_name); - if (ret) { - dev_warn(&pdev->dev, "failed to get firmware name\n"); - } else { - ret = sdma_get_firmware(sdma, fw_name); - if (ret) - dev_warn(&pdev->dev, "failed to get firmware from device tree\n"); - } + ret = sdma_get_firmware(sdma, fw_name); + if (ret) + dev_warn(&pdev->dev, "failed to get firmware from device tree\n"); } return 0; diff --git a/drivers/dma/lgm/Kconfig b/drivers/dma/lgm/Kconfig new file mode 100644 index 000000000000..9194330ed0f2 --- /dev/null +++ b/drivers/dma/lgm/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +config INTEL_LDMA + bool "Lightning Mountain centralized DMA controllers" + depends on X86 || COMPILE_TEST + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Enable support for Intel Lightning Mountain SOC DMA controllers. + These controllers provide DMA capabilities for a variety of on-chip + devices such as HSNAND and GSWIP (Gigabit Switch IP). diff --git a/drivers/dma/lgm/Makefile b/drivers/dma/lgm/Makefile new file mode 100644 index 000000000000..f318a8eff464 --- /dev/null +++ b/drivers/dma/lgm/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_INTEL_LDMA) += lgm-dma.o diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c new file mode 100644 index 000000000000..efe8bd3a0e2a --- /dev/null +++ b/drivers/dma/lgm/lgm-dma.c @@ -0,0 +1,1739 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lightning Mountain centralized DMA controller driver + * + * Copyright (c) 2016 - 2020 Intel Corporation. + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/of_dma.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include "../dmaengine.h" +#include "../virt-dma.h" + +#define DRIVER_NAME "lgm-dma" + +#define DMA_ID 0x0008 +#define DMA_ID_REV GENMASK(7, 0) +#define DMA_ID_PNR GENMASK(19, 16) +#define DMA_ID_CHNR GENMASK(26, 20) +#define DMA_ID_DW_128B BIT(27) +#define DMA_ID_AW_36B BIT(28) +#define DMA_VER32 0x32 +#define DMA_VER31 0x31 +#define DMA_VER22 0x0A + +#define DMA_CTRL 0x0010 +#define DMA_CTRL_RST BIT(0) +#define DMA_CTRL_DSRAM_PATH BIT(1) +#define DMA_CTRL_DBURST_WR BIT(3) +#define DMA_CTRL_VLD_DF_ACK BIT(4) +#define DMA_CTRL_CH_FL BIT(6) +#define DMA_CTRL_DS_FOD BIT(7) +#define DMA_CTRL_DRB BIT(8) +#define DMA_CTRL_ENBE BIT(9) +#define DMA_CTRL_DESC_TMOUT_CNT_V31 GENMASK(27, 16) +#define DMA_CTRL_DESC_TMOUT_EN_V31 BIT(30) +#define DMA_CTRL_PKTARB BIT(31) + +#define DMA_CPOLL 0x0014 +#define DMA_CPOLL_CNT GENMASK(15, 4) +#define DMA_CPOLL_EN BIT(31) + +#define DMA_CS 0x0018 +#define DMA_CS_MASK GENMASK(5, 0) + +#define DMA_CCTRL 0x001C +#define DMA_CCTRL_ON BIT(0) +#define DMA_CCTRL_RST BIT(1) +#define DMA_CCTRL_CH_POLL_EN BIT(2) +#define DMA_CCTRL_CH_ABC BIT(3) /* Adaptive Burst Chop */ +#define DMA_CDBA_MSB GENMASK(7, 4) +#define DMA_CCTRL_DIR_TX BIT(8) +#define DMA_CCTRL_CLASS GENMASK(11, 9) +#define DMA_CCTRL_CLASSH GENMASK(19, 18) +#define DMA_CCTRL_WR_NP_EN BIT(21) +#define DMA_CCTRL_PDEN BIT(23) +#define DMA_MAX_CLASS (SZ_32 - 1) + +#define DMA_CDBA 0x0020 +#define DMA_CDLEN 0x0024 +#define DMA_CIS 0x0028 +#define DMA_CIE 0x002C +#define DMA_CI_EOP BIT(1) +#define DMA_CI_DUR BIT(2) +#define DMA_CI_DESCPT BIT(3) +#define DMA_CI_CHOFF BIT(4) +#define DMA_CI_RDERR BIT(5) +#define DMA_CI_ALL \ + (DMA_CI_EOP | DMA_CI_DUR | DMA_CI_DESCPT | DMA_CI_CHOFF | DMA_CI_RDERR) + +#define DMA_PS 0x0040 +#define DMA_PCTRL 0x0044 +#define DMA_PCTRL_RXBL16 BIT(0) +#define DMA_PCTRL_TXBL16 BIT(1) +#define DMA_PCTRL_RXBL GENMASK(3, 2) +#define DMA_PCTRL_RXBL_8 3 +#define DMA_PCTRL_TXBL GENMASK(5, 4) +#define DMA_PCTRL_TXBL_8 3 +#define DMA_PCTRL_PDEN BIT(6) +#define DMA_PCTRL_RXBL32 BIT(7) +#define DMA_PCTRL_RXENDI GENMASK(9, 8) +#define DMA_PCTRL_TXENDI GENMASK(11, 10) +#define DMA_PCTRL_TXBL32 BIT(15) +#define DMA_PCTRL_MEM_FLUSH BIT(16) + +#define DMA_IRNEN1 0x00E8 +#define DMA_IRNCR1 0x00EC +#define DMA_IRNEN 0x00F4 +#define DMA_IRNCR 0x00F8 +#define DMA_C_DP_TICK 0x100 +#define DMA_C_DP_TICK_TIKNARB GENMASK(15, 0) +#define DMA_C_DP_TICK_TIKARB GENMASK(31, 16) + +#define DMA_C_HDRM 0x110 +/* + * If header mode is set in DMA descriptor, + * If bit 30 is disabled, HDR_LEN must be configured according to channel + * requirement. + * If bit 30 is enabled(checksum with heade mode), HDR_LEN has no need to + * be configured. It will enable check sum for switch + * If header mode is not set in DMA descriptor, + * This register setting doesn't matter + */ +#define DMA_C_HDRM_HDR_SUM BIT(30) + +#define DMA_C_BOFF 0x120 +#define DMA_C_BOFF_BOF_LEN GENMASK(7, 0) +#define DMA_C_BOFF_EN BIT(31) + +#define DMA_ORRC 0x190 +#define DMA_ORRC_ORRCNT GENMASK(8, 4) +#define DMA_ORRC_EN BIT(31) + +#define DMA_C_ENDIAN 0x200 +#define DMA_C_END_DATAENDI GENMASK(1, 0) +#define DMA_C_END_DE_EN BIT(7) +#define DMA_C_END_DESENDI GENMASK(9, 8) +#define DMA_C_END_DES_EN BIT(16) + +/* DMA controller capability */ +#define DMA_ADDR_36BIT BIT(0) +#define DMA_DATA_128BIT BIT(1) +#define DMA_CHAN_FLOW_CTL BIT(2) +#define DMA_DESC_FOD BIT(3) +#define DMA_DESC_IN_SRAM BIT(4) +#define DMA_EN_BYTE_EN BIT(5) +#define DMA_DBURST_WR BIT(6) +#define DMA_VALID_DESC_FETCH_ACK BIT(7) +#define DMA_DFT_DRB BIT(8) + +#define DMA_ORRC_MAX_CNT (SZ_32 - 1) +#define DMA_DFT_POLL_CNT SZ_4 +#define DMA_DFT_BURST_V22 SZ_2 +#define DMA_BURSTL_8DW SZ_8 +#define DMA_BURSTL_16DW SZ_16 +#define DMA_BURSTL_32DW SZ_32 +#define DMA_DFT_BURST DMA_BURSTL_16DW +#define DMA_MAX_DESC_NUM (SZ_8K - 1) +#define DMA_CHAN_BOFF_MAX (SZ_256 - 1) +#define DMA_DFT_ENDIAN 0 + +#define DMA_DFT_DESC_TCNT 50 +#define DMA_HDR_LEN_MAX (SZ_16K - 1) + +/* DMA flags */ +#define DMA_TX_CH BIT(0) +#define DMA_RX_CH BIT(1) +#define DEVICE_ALLOC_DESC BIT(2) +#define CHAN_IN_USE BIT(3) +#define DMA_HW_DESC BIT(4) + +/* Descriptor fields */ +#define DESC_DATA_LEN GENMASK(15, 0) +#define DESC_BYTE_OFF GENMASK(25, 23) +#define DESC_EOP BIT(28) +#define DESC_SOP BIT(29) +#define DESC_C BIT(30) +#define DESC_OWN BIT(31) + +#define DMA_CHAN_RST 1 +#define DMA_MAX_SIZE (BIT(16) - 1) +#define MAX_LOWER_CHANS 32 +#define MASK_LOWER_CHANS GENMASK(4, 0) +#define DMA_OWN 1 +#define HIGH_4_BITS GENMASK(3, 0) +#define DMA_DFT_DESC_NUM 1 +#define DMA_PKT_DROP_DIS 0 + +enum ldma_chan_on_off { + DMA_CH_OFF = 0, + DMA_CH_ON = 1, +}; + +enum { + DMA_TYPE_TX = 0, + DMA_TYPE_RX, + DMA_TYPE_MCPY, +}; + +struct ldma_dev; +struct ldma_port; + +struct ldma_chan { + struct virt_dma_chan vchan; + struct ldma_port *port; /* back pointer */ + char name[8]; /* Channel name */ + int nr; /* Channel id in hardware */ + u32 flags; /* central way or channel based way */ + enum ldma_chan_on_off onoff; + dma_addr_t desc_phys; + void *desc_base; /* Virtual address */ + u32 desc_cnt; /* Number of descriptors */ + int rst; + u32 hdrm_len; + bool hdrm_csum; + u32 boff_len; + u32 data_endian; + u32 desc_endian; + bool pden; + bool desc_rx_np; + bool data_endian_en; + bool desc_endian_en; + bool abc_en; + bool desc_init; + struct dma_pool *desc_pool; /* Descriptors pool */ + u32 desc_num; + struct dw2_desc_sw *ds; + struct work_struct work; + struct dma_slave_config config; +}; + +struct ldma_port { + struct ldma_dev *ldev; /* back pointer */ + u32 portid; + u32 rxbl; + u32 txbl; + u32 rxendi; + u32 txendi; + u32 pkt_drop; +}; + +/* Instance specific data */ +struct ldma_inst_data { + bool desc_in_sram; + bool chan_fc; + bool desc_fod; /* Fetch On Demand */ + bool valid_desc_fetch_ack; + u32 orrc; /* Outstanding read count */ + const char *name; + u32 type; +}; + +struct ldma_dev { + struct device *dev; + void __iomem *base; + struct reset_control *rst; + struct clk *core_clk; + struct dma_device dma_dev; + u32 ver; + int irq; + struct ldma_port *ports; + struct ldma_chan *chans; /* channel list on this DMA or port */ + spinlock_t dev_lock; /* Controller register exclusive */ + u32 chan_nrs; + u32 port_nrs; + u32 channels_mask; + u32 flags; + u32 pollcnt; + const struct ldma_inst_data *inst; + struct workqueue_struct *wq; +}; + +struct dw2_desc { + u32 field; + u32 addr; +} __packed __aligned(8); + +struct dw2_desc_sw { + struct virt_dma_desc vdesc; + struct ldma_chan *chan; + dma_addr_t desc_phys; + size_t desc_cnt; + size_t size; + struct dw2_desc *desc_hw; +}; + +static inline void +ldma_update_bits(struct ldma_dev *d, u32 mask, u32 val, u32 ofs) +{ + u32 old_val, new_val; + + old_val = readl(d->base + ofs); + new_val = (old_val & ~mask) | (val & mask); + + if (new_val != old_val) + writel(new_val, d->base + ofs); +} + +static inline struct ldma_chan *to_ldma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct ldma_chan, vchan.chan); +} + +static inline struct ldma_dev *to_ldma_dev(struct dma_device *dma_dev) +{ + return container_of(dma_dev, struct ldma_dev, dma_dev); +} + +static inline struct dw2_desc_sw *to_lgm_dma_desc(struct virt_dma_desc *vdesc) +{ + return container_of(vdesc, struct dw2_desc_sw, vdesc); +} + +static inline bool ldma_chan_tx(struct ldma_chan *c) +{ + return !!(c->flags & DMA_TX_CH); +} + +static inline bool ldma_chan_is_hw_desc(struct ldma_chan *c) +{ + return !!(c->flags & DMA_HW_DESC); +} + +static void ldma_dev_reset(struct ldma_dev *d) + +{ + unsigned long flags; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, DMA_CTRL_RST, DMA_CTRL_RST, DMA_CTRL); + spin_unlock_irqrestore(&d->dev_lock, flags); +} + +static void ldma_dev_pkt_arb_cfg(struct ldma_dev *d, bool enable) +{ + unsigned long flags; + u32 mask = DMA_CTRL_PKTARB; + u32 val = enable ? DMA_CTRL_PKTARB : 0; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, mask, val, DMA_CTRL); + spin_unlock_irqrestore(&d->dev_lock, flags); +} + +static void ldma_dev_sram_desc_cfg(struct ldma_dev *d, bool enable) +{ + unsigned long flags; + u32 mask = DMA_CTRL_DSRAM_PATH; + u32 val = enable ? DMA_CTRL_DSRAM_PATH : 0; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, mask, val, DMA_CTRL); + spin_unlock_irqrestore(&d->dev_lock, flags); +} + +static void ldma_dev_chan_flow_ctl_cfg(struct ldma_dev *d, bool enable) +{ + unsigned long flags; + u32 mask, val; + + if (d->inst->type != DMA_TYPE_TX) + return; + + mask = DMA_CTRL_CH_FL; + val = enable ? DMA_CTRL_CH_FL : 0; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, mask, val, DMA_CTRL); + spin_unlock_irqrestore(&d->dev_lock, flags); +} + +static void ldma_dev_global_polling_enable(struct ldma_dev *d) +{ + unsigned long flags; + u32 mask = DMA_CPOLL_EN | DMA_CPOLL_CNT; + u32 val = DMA_CPOLL_EN; + + val |= FIELD_PREP(DMA_CPOLL_CNT, d->pollcnt); + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, mask, val, DMA_CPOLL); + spin_unlock_irqrestore(&d->dev_lock, flags); +} + +static void ldma_dev_desc_fetch_on_demand_cfg(struct ldma_dev *d, bool enable) +{ + unsigned long flags; + u32 mask, val; + + if (d->inst->type == DMA_TYPE_MCPY) + return; + + mask = DMA_CTRL_DS_FOD; + val = enable ? DMA_CTRL_DS_FOD : 0; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, mask, val, DMA_CTRL); + spin_unlock_irqrestore(&d->dev_lock, flags); +} + +static void ldma_dev_byte_enable_cfg(struct ldma_dev *d, bool enable) +{ + unsigned long flags; + u32 mask = DMA_CTRL_ENBE; + u32 val = enable ? DMA_CTRL_ENBE : 0; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, mask, val, DMA_CTRL); + spin_unlock_irqrestore(&d->dev_lock, flags); +} + +static void ldma_dev_orrc_cfg(struct ldma_dev *d) +{ + unsigned long flags; + u32 val = 0; + u32 mask; + + if (d->inst->type == DMA_TYPE_RX) + return; + + mask = DMA_ORRC_EN | DMA_ORRC_ORRCNT; + if (d->inst->orrc > 0 && d->inst->orrc <= DMA_ORRC_MAX_CNT) + val = DMA_ORRC_EN | FIELD_PREP(DMA_ORRC_ORRCNT, d->inst->orrc); + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, mask, val, DMA_ORRC); + spin_unlock_irqrestore(&d->dev_lock, flags); +} + +static void ldma_dev_df_tout_cfg(struct ldma_dev *d, bool enable, int tcnt) +{ + u32 mask = DMA_CTRL_DESC_TMOUT_CNT_V31; + unsigned long flags; + u32 val; + + if (enable) + val = DMA_CTRL_DESC_TMOUT_EN_V31 | FIELD_PREP(DMA_CTRL_DESC_TMOUT_CNT_V31, tcnt); + else + val = 0; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, mask, val, DMA_CTRL); + spin_unlock_irqrestore(&d->dev_lock, flags); +} + +static void ldma_dev_dburst_wr_cfg(struct ldma_dev *d, bool enable) +{ + unsigned long flags; + u32 mask, val; + + if (d->inst->type != DMA_TYPE_RX && d->inst->type != DMA_TYPE_MCPY) + return; + + mask = DMA_CTRL_DBURST_WR; + val = enable ? DMA_CTRL_DBURST_WR : 0; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, mask, val, DMA_CTRL); + spin_unlock_irqrestore(&d->dev_lock, flags); +} + +static void ldma_dev_vld_fetch_ack_cfg(struct ldma_dev *d, bool enable) +{ + unsigned long flags; + u32 mask, val; + + if (d->inst->type != DMA_TYPE_TX) + return; + + mask = DMA_CTRL_VLD_DF_ACK; + val = enable ? DMA_CTRL_VLD_DF_ACK : 0; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, mask, val, DMA_CTRL); + spin_unlock_irqrestore(&d->dev_lock, flags); +} + +static void ldma_dev_drb_cfg(struct ldma_dev *d, int enable) +{ + unsigned long flags; + u32 mask = DMA_CTRL_DRB; + u32 val = enable ? DMA_CTRL_DRB : 0; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, mask, val, DMA_CTRL); + spin_unlock_irqrestore(&d->dev_lock, flags); +} + +static int ldma_dev_cfg(struct ldma_dev *d) +{ + bool enable; + + ldma_dev_pkt_arb_cfg(d, true); + ldma_dev_global_polling_enable(d); + + enable = !!(d->flags & DMA_DFT_DRB); + ldma_dev_drb_cfg(d, enable); + + enable = !!(d->flags & DMA_EN_BYTE_EN); + ldma_dev_byte_enable_cfg(d, enable); + + enable = !!(d->flags & DMA_CHAN_FLOW_CTL); + ldma_dev_chan_flow_ctl_cfg(d, enable); + + enable = !!(d->flags & DMA_DESC_FOD); + ldma_dev_desc_fetch_on_demand_cfg(d, enable); + + enable = !!(d->flags & DMA_DESC_IN_SRAM); + ldma_dev_sram_desc_cfg(d, enable); + + enable = !!(d->flags & DMA_DBURST_WR); + ldma_dev_dburst_wr_cfg(d, enable); + + enable = !!(d->flags & DMA_VALID_DESC_FETCH_ACK); + ldma_dev_vld_fetch_ack_cfg(d, enable); + + if (d->ver > DMA_VER22) { + ldma_dev_orrc_cfg(d); + ldma_dev_df_tout_cfg(d, true, DMA_DFT_DESC_TCNT); + } + + dev_dbg(d->dev, "%s Controller 0x%08x configuration done\n", + d->inst->name, readl(d->base + DMA_CTRL)); + + return 0; +} + +static int ldma_chan_cctrl_cfg(struct ldma_chan *c, u32 val) +{ + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + u32 class_low, class_high; + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); + reg = readl(d->base + DMA_CCTRL); + /* Read from hardware */ + if (reg & DMA_CCTRL_DIR_TX) + c->flags |= DMA_TX_CH; + else + c->flags |= DMA_RX_CH; + + /* Keep the class value unchanged */ + class_low = FIELD_GET(DMA_CCTRL_CLASS, reg); + class_high = FIELD_GET(DMA_CCTRL_CLASSH, reg); + val &= ~DMA_CCTRL_CLASS; + val |= FIELD_PREP(DMA_CCTRL_CLASS, class_low); + val &= ~DMA_CCTRL_CLASSH; + val |= FIELD_PREP(DMA_CCTRL_CLASSH, class_high); + writel(val, d->base + DMA_CCTRL); + spin_unlock_irqrestore(&d->dev_lock, flags); + + return 0; +} + +static void ldma_chan_irq_init(struct ldma_chan *c) +{ + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + unsigned long flags; + u32 enofs, crofs; + u32 cn_bit; + + if (c->nr < MAX_LOWER_CHANS) { + enofs = DMA_IRNEN; + crofs = DMA_IRNCR; + } else { + enofs = DMA_IRNEN1; + crofs = DMA_IRNCR1; + } + + cn_bit = BIT(c->nr & MASK_LOWER_CHANS); + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); + + /* Clear all interrupts and disabled it */ + writel(0, d->base + DMA_CIE); + writel(DMA_CI_ALL, d->base + DMA_CIS); + + ldma_update_bits(d, cn_bit, 0, enofs); + writel(cn_bit, d->base + crofs); + spin_unlock_irqrestore(&d->dev_lock, flags); +} + +static void ldma_chan_set_class(struct ldma_chan *c, u32 val) +{ + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + u32 class_val; + + if (d->inst->type == DMA_TYPE_MCPY || val > DMA_MAX_CLASS) + return; + + /* 3 bits low */ + class_val = FIELD_PREP(DMA_CCTRL_CLASS, val & 0x7); + /* 2 bits high */ + class_val |= FIELD_PREP(DMA_CCTRL_CLASSH, (val >> 3) & 0x3); + + ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); + ldma_update_bits(d, DMA_CCTRL_CLASS | DMA_CCTRL_CLASSH, class_val, + DMA_CCTRL); +} + +static int ldma_chan_on(struct ldma_chan *c) +{ + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + unsigned long flags; + + /* If descriptors not configured, not allow to turn on channel */ + if (WARN_ON(!c->desc_init)) + return -EINVAL; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); + ldma_update_bits(d, DMA_CCTRL_ON, DMA_CCTRL_ON, DMA_CCTRL); + spin_unlock_irqrestore(&d->dev_lock, flags); + + c->onoff = DMA_CH_ON; + + return 0; +} + +static int ldma_chan_off(struct ldma_chan *c) +{ + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + unsigned long flags; + u32 val; + int ret; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); + ldma_update_bits(d, DMA_CCTRL_ON, 0, DMA_CCTRL); + spin_unlock_irqrestore(&d->dev_lock, flags); + + ret = readl_poll_timeout_atomic(d->base + DMA_CCTRL, val, + !(val & DMA_CCTRL_ON), 0, 10000); + if (ret) + return ret; + + c->onoff = DMA_CH_OFF; + + return 0; +} + +static void ldma_chan_desc_hw_cfg(struct ldma_chan *c, dma_addr_t desc_base, + int desc_num) +{ + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + unsigned long flags; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); + writel(lower_32_bits(desc_base), d->base + DMA_CDBA); + + /* Higher 4 bits of 36 bit addressing */ + if (IS_ENABLED(CONFIG_64BIT)) { + u32 hi = upper_32_bits(desc_base) & HIGH_4_BITS; + + ldma_update_bits(d, DMA_CDBA_MSB, + FIELD_PREP(DMA_CDBA_MSB, hi), DMA_CCTRL); + } + writel(desc_num, d->base + DMA_CDLEN); + spin_unlock_irqrestore(&d->dev_lock, flags); + + c->desc_init = true; +} + +static struct dma_async_tx_descriptor * +ldma_chan_desc_cfg(struct dma_chan *chan, dma_addr_t desc_base, int desc_num) +{ + struct ldma_chan *c = to_ldma_chan(chan); + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + struct dma_async_tx_descriptor *tx; + struct dw2_desc_sw *ds; + + if (!desc_num) { + dev_err(d->dev, "Channel %d must allocate descriptor first\n", + c->nr); + return NULL; + } + + if (desc_num > DMA_MAX_DESC_NUM) { + dev_err(d->dev, "Channel %d descriptor number out of range %d\n", + c->nr, desc_num); + return NULL; + } + + ldma_chan_desc_hw_cfg(c, desc_base, desc_num); + + c->flags |= DMA_HW_DESC; + c->desc_cnt = desc_num; + c->desc_phys = desc_base; + + ds = kzalloc(sizeof(*ds), GFP_NOWAIT); + if (!ds) + return NULL; + + tx = &ds->vdesc.tx; + dma_async_tx_descriptor_init(tx, chan); + + return tx; +} + +static int ldma_chan_reset(struct ldma_chan *c) +{ + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + unsigned long flags; + u32 val; + int ret; + + ret = ldma_chan_off(c); + if (ret) + return ret; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); + ldma_update_bits(d, DMA_CCTRL_RST, DMA_CCTRL_RST, DMA_CCTRL); + spin_unlock_irqrestore(&d->dev_lock, flags); + + ret = readl_poll_timeout_atomic(d->base + DMA_CCTRL, val, + !(val & DMA_CCTRL_RST), 0, 10000); + if (ret) + return ret; + + c->rst = 1; + c->desc_init = false; + + return 0; +} + +static void ldma_chan_byte_offset_cfg(struct ldma_chan *c, u32 boff_len) +{ + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + u32 mask = DMA_C_BOFF_EN | DMA_C_BOFF_BOF_LEN; + u32 val; + + if (boff_len > 0 && boff_len <= DMA_CHAN_BOFF_MAX) + val = FIELD_PREP(DMA_C_BOFF_BOF_LEN, boff_len) | DMA_C_BOFF_EN; + else + val = 0; + + ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); + ldma_update_bits(d, mask, val, DMA_C_BOFF); +} + +static void ldma_chan_data_endian_cfg(struct ldma_chan *c, bool enable, + u32 endian_type) +{ + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + u32 mask = DMA_C_END_DE_EN | DMA_C_END_DATAENDI; + u32 val; + + if (enable) + val = DMA_C_END_DE_EN | FIELD_PREP(DMA_C_END_DATAENDI, endian_type); + else + val = 0; + + ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); + ldma_update_bits(d, mask, val, DMA_C_ENDIAN); +} + +static void ldma_chan_desc_endian_cfg(struct ldma_chan *c, bool enable, + u32 endian_type) +{ + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + u32 mask = DMA_C_END_DES_EN | DMA_C_END_DESENDI; + u32 val; + + if (enable) + val = DMA_C_END_DES_EN | FIELD_PREP(DMA_C_END_DESENDI, endian_type); + else + val = 0; + + ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); + ldma_update_bits(d, mask, val, DMA_C_ENDIAN); +} + +static void ldma_chan_hdr_mode_cfg(struct ldma_chan *c, u32 hdr_len, bool csum) +{ + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + u32 mask, val; + + /* NB, csum disabled, hdr length must be provided */ + if (!csum && (!hdr_len || hdr_len > DMA_HDR_LEN_MAX)) + return; + + mask = DMA_C_HDRM_HDR_SUM; + val = DMA_C_HDRM_HDR_SUM; + + if (!csum && hdr_len) + val = hdr_len; + + ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); + ldma_update_bits(d, mask, val, DMA_C_HDRM); +} + +static void ldma_chan_rxwr_np_cfg(struct ldma_chan *c, bool enable) +{ + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + u32 mask, val; + + /* Only valid for RX channel */ + if (ldma_chan_tx(c)) + return; + + mask = DMA_CCTRL_WR_NP_EN; + val = enable ? DMA_CCTRL_WR_NP_EN : 0; + + ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); + ldma_update_bits(d, mask, val, DMA_CCTRL); +} + +static void ldma_chan_abc_cfg(struct ldma_chan *c, bool enable) +{ + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + u32 mask, val; + + if (d->ver < DMA_VER32 || ldma_chan_tx(c)) + return; + + mask = DMA_CCTRL_CH_ABC; + val = enable ? DMA_CCTRL_CH_ABC : 0; + + ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); + ldma_update_bits(d, mask, val, DMA_CCTRL); +} + +static int ldma_port_cfg(struct ldma_port *p) +{ + unsigned long flags; + struct ldma_dev *d; + u32 reg; + + d = p->ldev; + reg = FIELD_PREP(DMA_PCTRL_TXENDI, p->txendi); + reg |= FIELD_PREP(DMA_PCTRL_RXENDI, p->rxendi); + + if (d->ver == DMA_VER22) { + reg |= FIELD_PREP(DMA_PCTRL_TXBL, p->txbl); + reg |= FIELD_PREP(DMA_PCTRL_RXBL, p->rxbl); + } else { + reg |= FIELD_PREP(DMA_PCTRL_PDEN, p->pkt_drop); + + if (p->txbl == DMA_BURSTL_32DW) + reg |= DMA_PCTRL_TXBL32; + else if (p->txbl == DMA_BURSTL_16DW) + reg |= DMA_PCTRL_TXBL16; + else + reg |= FIELD_PREP(DMA_PCTRL_TXBL, DMA_PCTRL_TXBL_8); + + if (p->rxbl == DMA_BURSTL_32DW) + reg |= DMA_PCTRL_RXBL32; + else if (p->rxbl == DMA_BURSTL_16DW) + reg |= DMA_PCTRL_RXBL16; + else + reg |= FIELD_PREP(DMA_PCTRL_RXBL, DMA_PCTRL_RXBL_8); + } + + spin_lock_irqsave(&d->dev_lock, flags); + writel(p->portid, d->base + DMA_PS); + writel(reg, d->base + DMA_PCTRL); + spin_unlock_irqrestore(&d->dev_lock, flags); + + reg = readl(d->base + DMA_PCTRL); /* read back */ + dev_dbg(d->dev, "Port Control 0x%08x configuration done\n", reg); + + return 0; +} + +static int ldma_chan_cfg(struct ldma_chan *c) +{ + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + unsigned long flags; + u32 reg; + + reg = c->pden ? DMA_CCTRL_PDEN : 0; + reg |= c->onoff ? DMA_CCTRL_ON : 0; + reg |= c->rst ? DMA_CCTRL_RST : 0; + + ldma_chan_cctrl_cfg(c, reg); + ldma_chan_irq_init(c); + + if (d->ver <= DMA_VER22) + return 0; + + spin_lock_irqsave(&d->dev_lock, flags); + ldma_chan_set_class(c, c->nr); + ldma_chan_byte_offset_cfg(c, c->boff_len); + ldma_chan_data_endian_cfg(c, c->data_endian_en, c->data_endian); + ldma_chan_desc_endian_cfg(c, c->desc_endian_en, c->desc_endian); + ldma_chan_hdr_mode_cfg(c, c->hdrm_len, c->hdrm_csum); + ldma_chan_rxwr_np_cfg(c, c->desc_rx_np); + ldma_chan_abc_cfg(c, c->abc_en); + spin_unlock_irqrestore(&d->dev_lock, flags); + + if (ldma_chan_is_hw_desc(c)) + ldma_chan_desc_hw_cfg(c, c->desc_phys, c->desc_cnt); + + return 0; +} + +static void ldma_dev_init(struct ldma_dev *d) +{ + unsigned long ch_mask = (unsigned long)d->channels_mask; + struct ldma_port *p; + struct ldma_chan *c; + int i; + u32 j; + + spin_lock_init(&d->dev_lock); + ldma_dev_reset(d); + ldma_dev_cfg(d); + + /* DMA port initialization */ + for (i = 0; i < d->port_nrs; i++) { + p = &d->ports[i]; + ldma_port_cfg(p); + } + + /* DMA channel initialization */ + for_each_set_bit(j, &ch_mask, d->chan_nrs) { + c = &d->chans[j]; + ldma_chan_cfg(c); + } +} + +static int ldma_cfg_init(struct ldma_dev *d) +{ + struct fwnode_handle *fwnode = dev_fwnode(d->dev); + struct ldma_port *p; + int i; + + if (fwnode_property_read_bool(fwnode, "intel,dma-byte-en")) + d->flags |= DMA_EN_BYTE_EN; + + if (fwnode_property_read_bool(fwnode, "intel,dma-dburst-wr")) + d->flags |= DMA_DBURST_WR; + + if (fwnode_property_read_bool(fwnode, "intel,dma-drb")) + d->flags |= DMA_DFT_DRB; + + if (fwnode_property_read_u32(fwnode, "intel,dma-poll-cnt", + &d->pollcnt)) + d->pollcnt = DMA_DFT_POLL_CNT; + + if (d->inst->chan_fc) + d->flags |= DMA_CHAN_FLOW_CTL; + + if (d->inst->desc_fod) + d->flags |= DMA_DESC_FOD; + + if (d->inst->desc_in_sram) + d->flags |= DMA_DESC_IN_SRAM; + + if (d->inst->valid_desc_fetch_ack) + d->flags |= DMA_VALID_DESC_FETCH_ACK; + + if (d->ver > DMA_VER22) { + if (!d->port_nrs) + return -EINVAL; + + for (i = 0; i < d->port_nrs; i++) { + p = &d->ports[i]; + p->rxendi = DMA_DFT_ENDIAN; + p->txendi = DMA_DFT_ENDIAN; + p->rxbl = DMA_DFT_BURST; + p->txbl = DMA_DFT_BURST; + p->pkt_drop = DMA_PKT_DROP_DIS; + } + } + + return 0; +} + +static void dma_free_desc_resource(struct virt_dma_desc *vdesc) +{ + struct dw2_desc_sw *ds = to_lgm_dma_desc(vdesc); + struct ldma_chan *c = ds->chan; + + dma_pool_free(c->desc_pool, ds->desc_hw, ds->desc_phys); + kfree(ds); +} + +static struct dw2_desc_sw * +dma_alloc_desc_resource(int num, struct ldma_chan *c) +{ + struct device *dev = c->vchan.chan.device->dev; + struct dw2_desc_sw *ds; + + if (num > c->desc_num) { + dev_err(dev, "sg num %d exceed max %d\n", num, c->desc_num); + return NULL; + } + + ds = kzalloc(sizeof(*ds), GFP_NOWAIT); + if (!ds) + return NULL; + + ds->chan = c; + ds->desc_hw = dma_pool_zalloc(c->desc_pool, GFP_ATOMIC, + &ds->desc_phys); + if (!ds->desc_hw) { + dev_dbg(dev, "out of memory for link descriptor\n"); + kfree(ds); + return NULL; + } + ds->desc_cnt = num; + + return ds; +} + +static void ldma_chan_irq_en(struct ldma_chan *c) +{ + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + unsigned long flags; + + spin_lock_irqsave(&d->dev_lock, flags); + writel(c->nr, d->base + DMA_CS); + writel(DMA_CI_EOP, d->base + DMA_CIE); + writel(BIT(c->nr), d->base + DMA_IRNEN); + spin_unlock_irqrestore(&d->dev_lock, flags); +} + +static void ldma_issue_pending(struct dma_chan *chan) +{ + struct ldma_chan *c = to_ldma_chan(chan); + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + unsigned long flags; + + if (d->ver == DMA_VER22) { + spin_lock_irqsave(&c->vchan.lock, flags); + if (vchan_issue_pending(&c->vchan)) { + struct virt_dma_desc *vdesc; + + /* Get the next descriptor */ + vdesc = vchan_next_desc(&c->vchan); + if (!vdesc) { + c->ds = NULL; + spin_unlock_irqrestore(&c->vchan.lock, flags); + return; + } + list_del(&vdesc->node); + c->ds = to_lgm_dma_desc(vdesc); + ldma_chan_desc_hw_cfg(c, c->ds->desc_phys, c->ds->desc_cnt); + ldma_chan_irq_en(c); + } + spin_unlock_irqrestore(&c->vchan.lock, flags); + } + ldma_chan_on(c); +} + +static void ldma_synchronize(struct dma_chan *chan) +{ + struct ldma_chan *c = to_ldma_chan(chan); + + /* + * clear any pending work if any. In that + * case the resource needs to be free here. + */ + cancel_work_sync(&c->work); + vchan_synchronize(&c->vchan); + if (c->ds) + dma_free_desc_resource(&c->ds->vdesc); +} + +static int ldma_terminate_all(struct dma_chan *chan) +{ + struct ldma_chan *c = to_ldma_chan(chan); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&c->vchan.lock, flags); + vchan_get_all_descriptors(&c->vchan, &head); + spin_unlock_irqrestore(&c->vchan.lock, flags); + vchan_dma_desc_free_list(&c->vchan, &head); + + return ldma_chan_reset(c); +} + +static int ldma_resume_chan(struct dma_chan *chan) +{ + struct ldma_chan *c = to_ldma_chan(chan); + + ldma_chan_on(c); + + return 0; +} + +static int ldma_pause_chan(struct dma_chan *chan) +{ + struct ldma_chan *c = to_ldma_chan(chan); + + return ldma_chan_off(c); +} + +static enum dma_status +ldma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct ldma_chan *c = to_ldma_chan(chan); + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + enum dma_status status = DMA_COMPLETE; + + if (d->ver == DMA_VER22) + status = dma_cookie_status(chan, cookie, txstate); + + return status; +} + +static void dma_chan_irq(int irq, void *data) +{ + struct ldma_chan *c = data; + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + u32 stat; + + /* Disable channel interrupts */ + writel(c->nr, d->base + DMA_CS); + stat = readl(d->base + DMA_CIS); + if (!stat) + return; + + writel(readl(d->base + DMA_CIE) & ~DMA_CI_ALL, d->base + DMA_CIE); + writel(stat, d->base + DMA_CIS); + queue_work(d->wq, &c->work); +} + +static irqreturn_t dma_interrupt(int irq, void *dev_id) +{ + struct ldma_dev *d = dev_id; + struct ldma_chan *c; + unsigned long irncr; + u32 cid; + + irncr = readl(d->base + DMA_IRNCR); + if (!irncr) { + dev_err(d->dev, "dummy interrupt\n"); + return IRQ_NONE; + } + + for_each_set_bit(cid, &irncr, d->chan_nrs) { + /* Mask */ + writel(readl(d->base + DMA_IRNEN) & ~BIT(cid), d->base + DMA_IRNEN); + /* Ack */ + writel(readl(d->base + DMA_IRNCR) | BIT(cid), d->base + DMA_IRNCR); + + c = &d->chans[cid]; + dma_chan_irq(irq, c); + } + + return IRQ_HANDLED; +} + +static void prep_slave_burst_len(struct ldma_chan *c) +{ + struct ldma_port *p = c->port; + struct dma_slave_config *cfg = &c->config; + + if (cfg->dst_maxburst) + cfg->src_maxburst = cfg->dst_maxburst; + + /* TX and RX has the same burst length */ + p->txbl = ilog2(cfg->src_maxburst); + p->rxbl = p->txbl; +} + +static struct dma_async_tx_descriptor * +ldma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long flags, void *context) +{ + struct ldma_chan *c = to_ldma_chan(chan); + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + size_t len, avail, total = 0; + struct dw2_desc *hw_ds; + struct dw2_desc_sw *ds; + struct scatterlist *sg; + int num = sglen, i; + dma_addr_t addr; + + if (!sgl) + return NULL; + + if (d->ver > DMA_VER22) + return ldma_chan_desc_cfg(chan, sgl->dma_address, sglen); + + for_each_sg(sgl, sg, sglen, i) { + avail = sg_dma_len(sg); + if (avail > DMA_MAX_SIZE) + num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; + } + + ds = dma_alloc_desc_resource(num, c); + if (!ds) + return NULL; + + c->ds = ds; + + num = 0; + /* sop and eop has to be handled nicely */ + for_each_sg(sgl, sg, sglen, i) { + addr = sg_dma_address(sg); + avail = sg_dma_len(sg); + total += avail; + + do { + len = min_t(size_t, avail, DMA_MAX_SIZE); + + hw_ds = &ds->desc_hw[num]; + switch (sglen) { + case 1: + hw_ds->field &= ~DESC_SOP; + hw_ds->field |= FIELD_PREP(DESC_SOP, 1); + + hw_ds->field &= ~DESC_EOP; + hw_ds->field |= FIELD_PREP(DESC_EOP, 1); + break; + default: + if (num == 0) { + hw_ds->field &= ~DESC_SOP; + hw_ds->field |= FIELD_PREP(DESC_SOP, 1); + + hw_ds->field &= ~DESC_EOP; + hw_ds->field |= FIELD_PREP(DESC_EOP, 0); + } else if (num == (sglen - 1)) { + hw_ds->field &= ~DESC_SOP; + hw_ds->field |= FIELD_PREP(DESC_SOP, 0); + hw_ds->field &= ~DESC_EOP; + hw_ds->field |= FIELD_PREP(DESC_EOP, 1); + } else { + hw_ds->field &= ~DESC_SOP; + hw_ds->field |= FIELD_PREP(DESC_SOP, 0); + + hw_ds->field &= ~DESC_EOP; + hw_ds->field |= FIELD_PREP(DESC_EOP, 0); + } + break; + } + /* Only 32 bit address supported */ + hw_ds->addr = (u32)addr; + + hw_ds->field &= ~DESC_DATA_LEN; + hw_ds->field |= FIELD_PREP(DESC_DATA_LEN, len); + + hw_ds->field &= ~DESC_C; + hw_ds->field |= FIELD_PREP(DESC_C, 0); + + hw_ds->field &= ~DESC_BYTE_OFF; + hw_ds->field |= FIELD_PREP(DESC_BYTE_OFF, addr & 0x3); + + /* Ensure data ready before ownership change */ + wmb(); + hw_ds->field &= ~DESC_OWN; + hw_ds->field |= FIELD_PREP(DESC_OWN, DMA_OWN); + + /* Ensure ownership changed before moving forward */ + wmb(); + num++; + addr += len; + avail -= len; + } while (avail); + } + + ds->size = total; + prep_slave_burst_len(c); + + return vchan_tx_prep(&c->vchan, &ds->vdesc, DMA_CTRL_ACK); +} + +static int +ldma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) +{ + struct ldma_chan *c = to_ldma_chan(chan); + + memcpy(&c->config, cfg, sizeof(c->config)); + + return 0; +} + +static int ldma_alloc_chan_resources(struct dma_chan *chan) +{ + struct ldma_chan *c = to_ldma_chan(chan); + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + struct device *dev = c->vchan.chan.device->dev; + size_t desc_sz; + + if (d->ver > DMA_VER22) { + c->flags |= CHAN_IN_USE; + return 0; + } + + if (c->desc_pool) + return c->desc_num; + + desc_sz = c->desc_num * sizeof(struct dw2_desc); + c->desc_pool = dma_pool_create(c->name, dev, desc_sz, + __alignof__(struct dw2_desc), 0); + + if (!c->desc_pool) { + dev_err(dev, "unable to allocate descriptor pool\n"); + return -ENOMEM; + } + + return c->desc_num; +} + +static void ldma_free_chan_resources(struct dma_chan *chan) +{ + struct ldma_chan *c = to_ldma_chan(chan); + struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); + + if (d->ver == DMA_VER22) { + dma_pool_destroy(c->desc_pool); + c->desc_pool = NULL; + vchan_free_chan_resources(to_virt_chan(chan)); + ldma_chan_reset(c); + } else { + c->flags &= ~CHAN_IN_USE; + } +} + +static void dma_work(struct work_struct *work) +{ + struct ldma_chan *c = container_of(work, struct ldma_chan, work); + struct dma_async_tx_descriptor *tx = &c->ds->vdesc.tx; + struct virt_dma_chan *vc = &c->vchan; + struct dmaengine_desc_callback cb; + struct virt_dma_desc *vd, *_vd; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&c->vchan.lock, flags); + list_splice_tail_init(&vc->desc_completed, &head); + spin_unlock_irqrestore(&c->vchan.lock, flags); + dmaengine_desc_get_callback(tx, &cb); + dma_cookie_complete(tx); + dmaengine_desc_callback_invoke(&cb, NULL); + + list_for_each_entry_safe(vd, _vd, &head, node) { + dmaengine_desc_get_callback(tx, &cb); + dma_cookie_complete(tx); + list_del(&vd->node); + dmaengine_desc_callback_invoke(&cb, NULL); + + vchan_vdesc_fini(vd); + } + c->ds = NULL; +} + +static void +update_burst_len_v22(struct ldma_chan *c, struct ldma_port *p, u32 burst) +{ + if (ldma_chan_tx(c)) + p->txbl = ilog2(burst); + else + p->rxbl = ilog2(burst); +} + +static void +update_burst_len_v3X(struct ldma_chan *c, struct ldma_port *p, u32 burst) +{ + if (ldma_chan_tx(c)) + p->txbl = burst; + else + p->rxbl = burst; +} + +static int +update_client_configs(struct of_dma *ofdma, struct of_phandle_args *spec) +{ + struct ldma_dev *d = ofdma->of_dma_data; + u32 chan_id = spec->args[0]; + u32 port_id = spec->args[1]; + u32 burst = spec->args[2]; + struct ldma_port *p; + struct ldma_chan *c; + + if (chan_id >= d->chan_nrs || port_id >= d->port_nrs) + return 0; + + p = &d->ports[port_id]; + c = &d->chans[chan_id]; + c->port = p; + + if (d->ver == DMA_VER22) + update_burst_len_v22(c, p, burst); + else + update_burst_len_v3X(c, p, burst); + + ldma_port_cfg(p); + + return 1; +} + +static struct dma_chan *ldma_xlate(struct of_phandle_args *spec, + struct of_dma *ofdma) +{ + struct ldma_dev *d = ofdma->of_dma_data; + u32 chan_id = spec->args[0]; + int ret; + + if (!spec->args_count) + return NULL; + + /* if args_count is 1 driver use default settings */ + if (spec->args_count > 1) { + ret = update_client_configs(ofdma, spec); + if (!ret) + return NULL; + } + + return dma_get_slave_channel(&d->chans[chan_id].vchan.chan); +} + +static void ldma_dma_init_v22(int i, struct ldma_dev *d) +{ + struct ldma_chan *c; + + c = &d->chans[i]; + c->nr = i; /* Real channel number */ + c->rst = DMA_CHAN_RST; + c->desc_num = DMA_DFT_DESC_NUM; + snprintf(c->name, sizeof(c->name), "chan%d", c->nr); + INIT_WORK(&c->work, dma_work); + c->vchan.desc_free = dma_free_desc_resource; + vchan_init(&c->vchan, &d->dma_dev); +} + +static void ldma_dma_init_v3X(int i, struct ldma_dev *d) +{ + struct ldma_chan *c; + + c = &d->chans[i]; + c->data_endian = DMA_DFT_ENDIAN; + c->desc_endian = DMA_DFT_ENDIAN; + c->data_endian_en = false; + c->desc_endian_en = false; + c->desc_rx_np = false; + c->flags |= DEVICE_ALLOC_DESC; + c->onoff = DMA_CH_OFF; + c->rst = DMA_CHAN_RST; + c->abc_en = true; + c->hdrm_csum = false; + c->boff_len = 0; + c->nr = i; + c->vchan.desc_free = dma_free_desc_resource; + vchan_init(&c->vchan, &d->dma_dev); +} + +static int ldma_init_v22(struct ldma_dev *d, struct platform_device *pdev) +{ + int ret; + + ret = device_property_read_u32(d->dev, "dma-channels", &d->chan_nrs); + if (ret < 0) { + dev_err(d->dev, "unable to read dma-channels property\n"); + return ret; + } + + d->irq = platform_get_irq(pdev, 0); + if (d->irq < 0) + return d->irq; + + ret = devm_request_irq(&pdev->dev, d->irq, dma_interrupt, 0, + DRIVER_NAME, d); + if (ret) + return ret; + + d->wq = alloc_ordered_workqueue("dma_wq", WQ_MEM_RECLAIM | + WQ_HIGHPRI); + if (!d->wq) + return -ENOMEM; + + return 0; +} + +static void ldma_clk_disable(void *data) +{ + struct ldma_dev *d = data; + + clk_disable_unprepare(d->core_clk); + reset_control_assert(d->rst); +} + +static const struct ldma_inst_data dma0 = { + .name = "dma0", + .chan_fc = false, + .desc_fod = false, + .desc_in_sram = false, + .valid_desc_fetch_ack = false, +}; + +static const struct ldma_inst_data dma2tx = { + .name = "dma2tx", + .type = DMA_TYPE_TX, + .orrc = 16, + .chan_fc = true, + .desc_fod = true, + .desc_in_sram = true, + .valid_desc_fetch_ack = true, +}; + +static const struct ldma_inst_data dma1rx = { + .name = "dma1rx", + .type = DMA_TYPE_RX, + .orrc = 16, + .chan_fc = false, + .desc_fod = true, + .desc_in_sram = true, + .valid_desc_fetch_ack = false, +}; + +static const struct ldma_inst_data dma1tx = { + .name = "dma1tx", + .type = DMA_TYPE_TX, + .orrc = 16, + .chan_fc = true, + .desc_fod = true, + .desc_in_sram = true, + .valid_desc_fetch_ack = true, +}; + +static const struct ldma_inst_data dma0tx = { + .name = "dma0tx", + .type = DMA_TYPE_TX, + .orrc = 16, + .chan_fc = true, + .desc_fod = true, + .desc_in_sram = true, + .valid_desc_fetch_ack = true, +}; + +static const struct ldma_inst_data dma3 = { + .name = "dma3", + .type = DMA_TYPE_MCPY, + .orrc = 16, + .chan_fc = false, + .desc_fod = false, + .desc_in_sram = true, + .valid_desc_fetch_ack = false, +}; + +static const struct ldma_inst_data toe_dma30 = { + .name = "toe_dma30", + .type = DMA_TYPE_MCPY, + .orrc = 16, + .chan_fc = false, + .desc_fod = false, + .desc_in_sram = true, + .valid_desc_fetch_ack = true, +}; + +static const struct ldma_inst_data toe_dma31 = { + .name = "toe_dma31", + .type = DMA_TYPE_MCPY, + .orrc = 16, + .chan_fc = false, + .desc_fod = false, + .desc_in_sram = true, + .valid_desc_fetch_ack = true, +}; + +static const struct of_device_id intel_ldma_match[] = { + { .compatible = "intel,lgm-cdma", .data = &dma0}, + { .compatible = "intel,lgm-dma2tx", .data = &dma2tx}, + { .compatible = "intel,lgm-dma1rx", .data = &dma1rx}, + { .compatible = "intel,lgm-dma1tx", .data = &dma1tx}, + { .compatible = "intel,lgm-dma0tx", .data = &dma0tx}, + { .compatible = "intel,lgm-dma3", .data = &dma3}, + { .compatible = "intel,lgm-toe-dma30", .data = &toe_dma30}, + { .compatible = "intel,lgm-toe-dma31", .data = &toe_dma31}, + {} +}; + +static int intel_ldma_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dma_device *dma_dev; + unsigned long ch_mask; + struct ldma_chan *c; + struct ldma_port *p; + struct ldma_dev *d; + u32 id, bitn = 32, j; + int i, ret; + + d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; + + /* Link controller to platform device */ + d->dev = &pdev->dev; + + d->inst = device_get_match_data(dev); + if (!d->inst) { + dev_err(dev, "No device match found\n"); + return -ENODEV; + } + + d->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(d->base)) + return PTR_ERR(d->base); + + /* Power up and reset the dma engine, some DMAs always on?? */ + d->core_clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(d->core_clk)) + return PTR_ERR(d->core_clk); + clk_prepare_enable(d->core_clk); + + d->rst = devm_reset_control_get_optional(dev, NULL); + if (IS_ERR(d->rst)) + return PTR_ERR(d->rst); + reset_control_deassert(d->rst); + + ret = devm_add_action_or_reset(dev, ldma_clk_disable, d); + if (ret) { + dev_err(dev, "Failed to devm_add_action_or_reset, %d\n", ret); + return ret; + } + + id = readl(d->base + DMA_ID); + d->chan_nrs = FIELD_GET(DMA_ID_CHNR, id); + d->port_nrs = FIELD_GET(DMA_ID_PNR, id); + d->ver = FIELD_GET(DMA_ID_REV, id); + + if (id & DMA_ID_AW_36B) + d->flags |= DMA_ADDR_36BIT; + + if (IS_ENABLED(CONFIG_64BIT) && (id & DMA_ID_AW_36B)) + bitn = 36; + + if (id & DMA_ID_DW_128B) + d->flags |= DMA_DATA_128BIT; + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(bitn)); + if (ret) { + dev_err(dev, "No usable DMA configuration\n"); + return ret; + } + + if (d->ver == DMA_VER22) { + ret = ldma_init_v22(d, pdev); + if (ret) + return ret; + } + + ret = device_property_read_u32(dev, "dma-channel-mask", &d->channels_mask); + if (ret < 0) + d->channels_mask = GENMASK(d->chan_nrs - 1, 0); + + dma_dev = &d->dma_dev; + + dma_cap_zero(dma_dev->cap_mask); + dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + + /* Channel initializations */ + INIT_LIST_HEAD(&dma_dev->channels); + + /* Port Initializations */ + d->ports = devm_kcalloc(dev, d->port_nrs, sizeof(*p), GFP_KERNEL); + if (!d->ports) + return -ENOMEM; + + /* Channels Initializations */ + d->chans = devm_kcalloc(d->dev, d->chan_nrs, sizeof(*c), GFP_KERNEL); + if (!d->chans) + return -ENOMEM; + + for (i = 0; i < d->port_nrs; i++) { + p = &d->ports[i]; + p->portid = i; + p->ldev = d; + } + + ret = ldma_cfg_init(d); + if (ret) + return ret; + + dma_dev->dev = &pdev->dev; + + ch_mask = (unsigned long)d->channels_mask; + for_each_set_bit(j, &ch_mask, d->chan_nrs) { + if (d->ver == DMA_VER22) + ldma_dma_init_v22(j, d); + else + ldma_dma_init_v3X(j, d); + } + + dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources; + dma_dev->device_free_chan_resources = ldma_free_chan_resources; + dma_dev->device_terminate_all = ldma_terminate_all; + dma_dev->device_issue_pending = ldma_issue_pending; + dma_dev->device_tx_status = ldma_tx_status; + dma_dev->device_resume = ldma_resume_chan; + dma_dev->device_pause = ldma_pause_chan; + dma_dev->device_prep_slave_sg = ldma_prep_slave_sg; + + if (d->ver == DMA_VER22) { + dma_dev->device_config = ldma_slave_config; + dma_dev->device_synchronize = ldma_synchronize; + dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma_dev->directions = BIT(DMA_MEM_TO_DEV) | + BIT(DMA_DEV_TO_MEM); + dma_dev->residue_granularity = + DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + } + + platform_set_drvdata(pdev, d); + + ldma_dev_init(d); + + ret = dma_async_device_register(dma_dev); + if (ret) { + dev_err(dev, "Failed to register slave DMA engine device\n"); + return ret; + } + + ret = of_dma_controller_register(pdev->dev.of_node, ldma_xlate, d); + if (ret) { + dev_err(dev, "Failed to register of DMA controller\n"); + dma_async_device_unregister(dma_dev); + return ret; + } + + dev_info(dev, "Init done - rev: %x, ports: %d channels: %d\n", d->ver, + d->port_nrs, d->chan_nrs); + + return 0; +} + +static struct platform_driver intel_ldma_driver = { + .probe = intel_ldma_probe, + .driver = { + .name = DRIVER_NAME, + .of_match_table = intel_ldma_match, + }, +}; + +/* + * Perform this driver as device_initcall to make sure initialization happens + * before its DMA clients of some are platform specific and also to provide + * registered DMA channels and DMA capabilities to clients before their + * initialization. + */ +static int __init intel_ldma_init(void) +{ + return platform_driver_register(&intel_ldma_driver); +} + +device_initcall(intel_ldma_init); diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index b84303be8edf..89f1814ff27a 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -18,7 +18,6 @@ #include <linux/of_device.h> #include <linux/of_dma.h> #include <linux/of.h> -#include <linux/dma/mmp-pdma.h> #include "dmaengine.h" @@ -1148,19 +1147,6 @@ static struct platform_driver mmp_pdma_driver = { .remove = mmp_pdma_remove, }; -bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param) -{ - struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan); - - if (chan->device->dev->driver != &mmp_pdma_driver.driver) - return false; - - c->drcmr = *(unsigned int *)param; - - return true; -} -EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn); - module_platform_driver(mmp_pdma_driver); MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver"); diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c index 9fede32641e9..1f0bbaed4643 100644 --- a/drivers/dma/owl-dma.c +++ b/drivers/dma/owl-dma.c @@ -1080,8 +1080,9 @@ static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec, } static const struct of_device_id owl_dma_match[] = { - { .compatible = "actions,s900-dma", .data = (void *)S900_DMA,}, + { .compatible = "actions,s500-dma", .data = (void *)S900_DMA,}, { .compatible = "actions,s700-dma", .data = (void *)S700_DMA,}, + { .compatible = "actions,s900-dma", .data = (void *)S900_DMA,}, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, owl_dma_match); @@ -1245,6 +1246,7 @@ static int owl_dma_remove(struct platform_device *pdev) owl_dma_free(od); clk_disable_unprepare(od->clk); + dma_pool_destroy(od->lli_pool); return 0; } diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 88579857ca1d..c8a77b428b52 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -1270,13 +1270,13 @@ static int bam_dma_probe(struct platform_device *pdev) dev_err(bdev->dev, "num-ees unspecified in dt\n"); } - bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); - if (IS_ERR(bdev->bamclk)) { - if (!bdev->controlled_remotely) - return PTR_ERR(bdev->bamclk); + if (bdev->controlled_remotely) + bdev->bamclk = devm_clk_get_optional(bdev->dev, "bam_clk"); + else + bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); - bdev->bamclk = NULL; - } + if (IS_ERR(bdev->bamclk)) + return PTR_ERR(bdev->bamclk); ret = clk_prepare_enable(bdev->bamclk); if (ret) { @@ -1350,7 +1350,7 @@ static int bam_dma_probe(struct platform_device *pdev) if (ret) goto err_unregister_dma; - if (bdev->controlled_remotely) { + if (!bdev->bamclk) { pm_runtime_disable(&pdev->dev); return 0; } @@ -1438,10 +1438,10 @@ static int __maybe_unused bam_dma_suspend(struct device *dev) { struct bam_device *bdev = dev_get_drvdata(dev); - if (!bdev->controlled_remotely) + if (bdev->bamclk) { pm_runtime_force_suspend(dev); - - clk_unprepare(bdev->bamclk); + clk_unprepare(bdev->bamclk); + } return 0; } @@ -1451,12 +1451,13 @@ static int __maybe_unused bam_dma_resume(struct device *dev) struct bam_device *bdev = dev_get_drvdata(dev); int ret; - ret = clk_prepare(bdev->bamclk); - if (ret) - return ret; + if (bdev->bamclk) { + ret = clk_prepare(bdev->bamclk); + if (ret) + return ret; - if (!bdev->controlled_remotely) pm_runtime_force_resume(dev); + } return 0; } diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c index 1a0bf6b0567a..57f5ee4235c7 100644 --- a/drivers/dma/qcom/gpi.c +++ b/drivers/dma/qcom/gpi.c @@ -584,7 +584,7 @@ static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr, gpi_write_reg(gpii, addr, val); } -static inline void +static __always_inline void gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val) { void __iomem *addr = gpii->regs + offset; @@ -1700,7 +1700,7 @@ static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc, tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE); tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT); - }; + } for (i = 0; i < tre_idx; i++) dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0], diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index a57705356e8b..d530c1bf11d9 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -189,7 +189,8 @@ struct rcar_dmac_chan { * struct rcar_dmac - R-Car Gen2 DMA Controller * @engine: base DMA engine object * @dev: the hardware device - * @iomem: remapped I/O memory base + * @dmac_base: remapped base register block + * @chan_base: remapped channel register block (optional) * @n_channels: number of available channels * @channels: array of DMAC channels * @channels_mask: bitfield of which DMA channels are managed by this driver @@ -198,7 +199,8 @@ struct rcar_dmac_chan { struct rcar_dmac { struct dma_device engine; struct device *dev; - void __iomem *iomem; + void __iomem *dmac_base; + void __iomem *chan_base; unsigned int n_channels; struct rcar_dmac_chan *channels; @@ -209,6 +211,10 @@ struct rcar_dmac { #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine) +#define for_each_rcar_dmac_chan(i, dmac, chan) \ + for (i = 0, chan = &(dmac)->channels[0]; i < (dmac)->n_channels; i++, chan++) \ + if (!((dmac)->channels_mask & BIT(i))) continue; else + /* * struct rcar_dmac_of_data - This driver's OF data * @chan_offset_base: DMAC channels base offset @@ -230,7 +236,7 @@ struct rcar_dmac_of_data { #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8) #define RCAR_DMAOR_AE (1 << 2) #define RCAR_DMAOR_DME (1 << 0) -#define RCAR_DMACHCLR 0x0080 +#define RCAR_DMACHCLR 0x0080 /* Not on R-Car V3U */ #define RCAR_DMADPSEC 0x00a0 #define RCAR_DMASAR 0x0000 @@ -293,6 +299,9 @@ struct rcar_dmac_of_data { #define RCAR_DMAFIXDAR 0x0014 #define RCAR_DMAFIXDPBASE 0x0060 +/* For R-Car V3U */ +#define RCAR_V3U_DMACHCLR 0x0100 + /* Hardcode the MEMCPY transfer size to 4 bytes. */ #define RCAR_DMAC_MEMCPY_XFER_SIZE 4 @@ -303,17 +312,17 @@ struct rcar_dmac_of_data { static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data) { if (reg == RCAR_DMAOR) - writew(data, dmac->iomem + reg); + writew(data, dmac->dmac_base + reg); else - writel(data, dmac->iomem + reg); + writel(data, dmac->dmac_base + reg); } static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg) { if (reg == RCAR_DMAOR) - return readw(dmac->iomem + reg); + return readw(dmac->dmac_base + reg); else - return readl(dmac->iomem + reg); + return readl(dmac->dmac_base + reg); } static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg) @@ -332,6 +341,28 @@ static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data) writel(data, chan->iomem + reg); } +static void rcar_dmac_chan_clear(struct rcar_dmac *dmac, + struct rcar_dmac_chan *chan) +{ + if (dmac->chan_base) + rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1); + else + rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index)); +} + +static void rcar_dmac_chan_clear_all(struct rcar_dmac *dmac) +{ + struct rcar_dmac_chan *chan; + unsigned int i; + + if (dmac->chan_base) { + for_each_rcar_dmac_chan(i, dmac, chan) + rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1); + } else { + rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask); + } +} + /* ----------------------------------------------------------------------------- * Initialization and configuration */ @@ -447,7 +478,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac) u16 dmaor; /* Clear all channels and enable the DMAC globally. */ - rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask); + rcar_dmac_chan_clear_all(dmac); rcar_dmac_write(dmac, RCAR_DMAOR, RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); @@ -817,15 +848,11 @@ static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan) static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac) { + struct rcar_dmac_chan *chan; unsigned int i; /* Stop all channels. */ - for (i = 0; i < dmac->n_channels; ++i) { - struct rcar_dmac_chan *chan = &dmac->channels[i]; - - if (!(dmac->channels_mask & BIT(i))) - continue; - + for_each_rcar_dmac_chan(i, dmac, chan) { /* Stop and reinitialize the channel. */ spin_lock_irq(&chan->lock); rcar_dmac_chan_halt(chan); @@ -1566,7 +1593,7 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev) * because channel is already stopped in error case. * We need to clear register and check DE bit as recovery. */ - rcar_dmac_write(dmac, RCAR_DMACHCLR, 1 << chan->index); + rcar_dmac_chan_clear(dmac, chan); rcar_dmac_chcr_de_barrier(chan); reinit = true; goto spin_lock_end; @@ -1732,9 +1759,7 @@ static const struct dev_pm_ops rcar_dmac_pm = { */ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, - struct rcar_dmac_chan *rchan, - const struct rcar_dmac_of_data *data, - unsigned int index) + struct rcar_dmac_chan *rchan) { struct platform_device *pdev = to_platform_device(dmac->dev); struct dma_chan *chan = &rchan->chan; @@ -1742,9 +1767,6 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, char *irqname; int ret; - rchan->index = index; - rchan->iomem = dmac->iomem + data->chan_offset_base + - data->chan_offset_stride * index; rchan->mid_rid = -EINVAL; spin_lock_init(&rchan->lock); @@ -1756,13 +1778,13 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, INIT_LIST_HEAD(&rchan->desc.wait); /* Request the channel interrupt. */ - sprintf(pdev_irqname, "ch%u", index); + sprintf(pdev_irqname, "ch%u", rchan->index); rchan->irq = platform_get_irq_byname(pdev, pdev_irqname); if (rchan->irq < 0) return -ENODEV; irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", - dev_name(dmac->dev), index); + dev_name(dmac->dev), rchan->index); if (!irqname) return -ENOMEM; @@ -1828,9 +1850,11 @@ static int rcar_dmac_probe(struct platform_device *pdev) DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; + const struct rcar_dmac_of_data *data; + struct rcar_dmac_chan *chan; struct dma_device *engine; + void __iomem *chan_base; struct rcar_dmac *dmac; - const struct rcar_dmac_of_data *data; unsigned int i; int ret; @@ -1868,9 +1892,24 @@ static int rcar_dmac_probe(struct platform_device *pdev) return -ENOMEM; /* Request resources. */ - dmac->iomem = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(dmac->iomem)) - return PTR_ERR(dmac->iomem); + dmac->dmac_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(dmac->dmac_base)) + return PTR_ERR(dmac->dmac_base); + + if (!data->chan_offset_base) { + dmac->chan_base = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(dmac->chan_base)) + return PTR_ERR(dmac->chan_base); + + chan_base = dmac->chan_base; + } else { + chan_base = dmac->dmac_base + data->chan_offset_base; + } + + for_each_rcar_dmac_chan(i, dmac, chan) { + chan->index = i; + chan->iomem = chan_base + i * data->chan_offset_stride; + } /* Enable runtime PM and initialize the device. */ pm_runtime_enable(&pdev->dev); @@ -1916,11 +1955,8 @@ static int rcar_dmac_probe(struct platform_device *pdev) INIT_LIST_HEAD(&engine->channels); - for (i = 0; i < dmac->n_channels; ++i) { - if (!(dmac->channels_mask & BIT(i))) - continue; - - ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], data, i); + for_each_rcar_dmac_chan(i, dmac, chan) { + ret = rcar_dmac_chan_probe(dmac, chan); if (ret < 0) goto error; } @@ -1968,14 +2004,22 @@ static void rcar_dmac_shutdown(struct platform_device *pdev) } static const struct rcar_dmac_of_data rcar_dmac_data = { - .chan_offset_base = 0x8000, - .chan_offset_stride = 0x80, + .chan_offset_base = 0x8000, + .chan_offset_stride = 0x80, +}; + +static const struct rcar_dmac_of_data rcar_v3u_dmac_data = { + .chan_offset_base = 0x0, + .chan_offset_stride = 0x1000, }; static const struct of_device_id rcar_dmac_of_ids[] = { { .compatible = "renesas,rcar-dmac", .data = &rcar_dmac_data, + }, { + .compatible = "renesas,dmac-r8a779a0", + .data = &rcar_v3u_dmac_data, }, { /* Sentinel */ } }; diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c deleted file mode 100644 index a5c2843384fd..000000000000 --- a/drivers/dma/sirf-dma.c +++ /dev/null @@ -1,1170 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * DMA controller driver for CSR SiRFprimaII - * - * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. - */ - -#include <linux/module.h> -#include <linux/dmaengine.h> -#include <linux/dma-mapping.h> -#include <linux/pm_runtime.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/slab.h> -#include <linux/of_irq.h> -#include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/of_platform.h> -#include <linux/clk.h> -#include <linux/of_dma.h> -#include <linux/sirfsoc_dma.h> - -#include "dmaengine.h" - -#define SIRFSOC_DMA_VER_A7V1 1 -#define SIRFSOC_DMA_VER_A7V2 2 -#define SIRFSOC_DMA_VER_A6 4 - -#define SIRFSOC_DMA_DESCRIPTORS 16 -#define SIRFSOC_DMA_CHANNELS 16 -#define SIRFSOC_DMA_TABLE_NUM 256 - -#define SIRFSOC_DMA_CH_ADDR 0x00 -#define SIRFSOC_DMA_CH_XLEN 0x04 -#define SIRFSOC_DMA_CH_YLEN 0x08 -#define SIRFSOC_DMA_CH_CTRL 0x0C - -#define SIRFSOC_DMA_WIDTH_0 0x100 -#define SIRFSOC_DMA_CH_VALID 0x140 -#define SIRFSOC_DMA_CH_INT 0x144 -#define SIRFSOC_DMA_INT_EN 0x148 -#define SIRFSOC_DMA_INT_EN_CLR 0x14C -#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 -#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x154 -#define SIRFSOC_DMA_WIDTH_ATLAS7 0x10 -#define SIRFSOC_DMA_VALID_ATLAS7 0x14 -#define SIRFSOC_DMA_INT_ATLAS7 0x18 -#define SIRFSOC_DMA_INT_EN_ATLAS7 0x1c -#define SIRFSOC_DMA_LOOP_CTRL_ATLAS7 0x20 -#define SIRFSOC_DMA_CUR_DATA_ADDR 0x34 -#define SIRFSOC_DMA_MUL_ATLAS7 0x38 -#define SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7 0x158 -#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7 0x15C -#define SIRFSOC_DMA_IOBG_SCMD_EN 0x800 -#define SIRFSOC_DMA_EARLY_RESP_SET 0x818 -#define SIRFSOC_DMA_EARLY_RESP_CLR 0x81C - -#define SIRFSOC_DMA_MODE_CTRL_BIT 4 -#define SIRFSOC_DMA_DIR_CTRL_BIT 5 -#define SIRFSOC_DMA_MODE_CTRL_BIT_ATLAS7 2 -#define SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7 3 -#define SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7 4 -#define SIRFSOC_DMA_TAB_NUM_ATLAS7 7 -#define SIRFSOC_DMA_CHAIN_INT_BIT_ATLAS7 5 -#define SIRFSOC_DMA_CHAIN_FLAG_SHIFT_ATLAS7 25 -#define SIRFSOC_DMA_CHAIN_ADDR_SHIFT 32 - -#define SIRFSOC_DMA_INT_FINI_INT_ATLAS7 BIT(0) -#define SIRFSOC_DMA_INT_CNT_INT_ATLAS7 BIT(1) -#define SIRFSOC_DMA_INT_PAU_INT_ATLAS7 BIT(2) -#define SIRFSOC_DMA_INT_LOOP_INT_ATLAS7 BIT(3) -#define SIRFSOC_DMA_INT_INV_INT_ATLAS7 BIT(4) -#define SIRFSOC_DMA_INT_END_INT_ATLAS7 BIT(5) -#define SIRFSOC_DMA_INT_ALL_ATLAS7 0x3F - -/* xlen and dma_width register is in 4 bytes boundary */ -#define SIRFSOC_DMA_WORD_LEN 4 -#define SIRFSOC_DMA_XLEN_MAX_V1 0x800 -#define SIRFSOC_DMA_XLEN_MAX_V2 0x1000 - -struct sirfsoc_dma_desc { - struct dma_async_tx_descriptor desc; - struct list_head node; - - /* SiRFprimaII 2D-DMA parameters */ - - int xlen; /* DMA xlen */ - int ylen; /* DMA ylen */ - int width; /* DMA width */ - int dir; - bool cyclic; /* is loop DMA? */ - bool chain; /* is chain DMA? */ - u32 addr; /* DMA buffer address */ - u64 chain_table[SIRFSOC_DMA_TABLE_NUM]; /* chain tbl */ -}; - -struct sirfsoc_dma_chan { - struct dma_chan chan; - struct list_head free; - struct list_head prepared; - struct list_head queued; - struct list_head active; - struct list_head completed; - unsigned long happened_cyclic; - unsigned long completed_cyclic; - - /* Lock for this structure */ - spinlock_t lock; - - int mode; -}; - -struct sirfsoc_dma_regs { - u32 ctrl[SIRFSOC_DMA_CHANNELS]; - u32 interrupt_en; -}; - -struct sirfsoc_dma { - struct dma_device dma; - struct tasklet_struct tasklet; - struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; - void __iomem *base; - int irq; - struct clk *clk; - int type; - void (*exec_desc)(struct sirfsoc_dma_desc *sdesc, - int cid, int burst_mode, void __iomem *base); - struct sirfsoc_dma_regs regs_save; -}; - -struct sirfsoc_dmadata { - void (*exec)(struct sirfsoc_dma_desc *sdesc, - int cid, int burst_mode, void __iomem *base); - int type; -}; - -enum sirfsoc_dma_chain_flag { - SIRFSOC_DMA_CHAIN_NORMAL = 0x01, - SIRFSOC_DMA_CHAIN_PAUSE = 0x02, - SIRFSOC_DMA_CHAIN_LOOP = 0x03, - SIRFSOC_DMA_CHAIN_END = 0x04 -}; - -#define DRV_NAME "sirfsoc_dma" - -static int sirfsoc_dma_runtime_suspend(struct device *dev); - -/* Convert struct dma_chan to struct sirfsoc_dma_chan */ -static inline -struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) -{ - return container_of(c, struct sirfsoc_dma_chan, chan); -} - -/* Convert struct dma_chan to struct sirfsoc_dma */ -static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c) -{ - struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c); - return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); -} - -static void sirfsoc_dma_execute_hw_a7v2(struct sirfsoc_dma_desc *sdesc, - int cid, int burst_mode, void __iomem *base) -{ - if (sdesc->chain) { - /* DMA v2 HW chain mode */ - writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) | - (sdesc->chain << - SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) | - (0x8 << SIRFSOC_DMA_TAB_NUM_ATLAS7) | 0x3, - base + SIRFSOC_DMA_CH_CTRL); - } else { - /* DMA v2 legacy mode */ - writel_relaxed(sdesc->xlen, base + SIRFSOC_DMA_CH_XLEN); - writel_relaxed(sdesc->ylen, base + SIRFSOC_DMA_CH_YLEN); - writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_ATLAS7); - writel_relaxed((sdesc->width*((sdesc->ylen+1)>>1)), - base + SIRFSOC_DMA_MUL_ATLAS7); - writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) | - (sdesc->chain << - SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) | - 0x3, base + SIRFSOC_DMA_CH_CTRL); - } - writel_relaxed(sdesc->chain ? SIRFSOC_DMA_INT_END_INT_ATLAS7 : - (SIRFSOC_DMA_INT_FINI_INT_ATLAS7 | - SIRFSOC_DMA_INT_LOOP_INT_ATLAS7), - base + SIRFSOC_DMA_INT_EN_ATLAS7); - writel(sdesc->addr, base + SIRFSOC_DMA_CH_ADDR); - if (sdesc->cyclic) - writel(0x10001, base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); -} - -static void sirfsoc_dma_execute_hw_a7v1(struct sirfsoc_dma_desc *sdesc, - int cid, int burst_mode, void __iomem *base) -{ - writel_relaxed(1, base + SIRFSOC_DMA_IOBG_SCMD_EN); - writel_relaxed((1 << cid), base + SIRFSOC_DMA_EARLY_RESP_SET); - writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4); - writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) | - (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), - base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); - writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN); - writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN); - writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) | - (1 << cid), base + SIRFSOC_DMA_INT_EN); - writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); - if (sdesc->cyclic) { - writel((1 << cid) | 1 << (cid + 16) | - readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7), - base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7); - } - -} - -static void sirfsoc_dma_execute_hw_a6(struct sirfsoc_dma_desc *sdesc, - int cid, int burst_mode, void __iomem *base) -{ - writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4); - writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) | - (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), - base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); - writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN); - writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN); - writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) | - (1 << cid), base + SIRFSOC_DMA_INT_EN); - writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); - if (sdesc->cyclic) { - writel((1 << cid) | 1 << (cid + 16) | - readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL), - base + SIRFSOC_DMA_CH_LOOP_CTRL); - } - -} - -/* Execute all queued DMA descriptors */ -static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) -{ - struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); - int cid = schan->chan.chan_id; - struct sirfsoc_dma_desc *sdesc = NULL; - void __iomem *base; - - /* - * lock has been held by functions calling this, so we don't hold - * lock again - */ - base = sdma->base; - sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, - node); - /* Move the first queued descriptor to active list */ - list_move_tail(&sdesc->node, &schan->active); - - if (sdma->type == SIRFSOC_DMA_VER_A7V2) - cid = 0; - - /* Start the DMA transfer */ - sdma->exec_desc(sdesc, cid, schan->mode, base); - - if (sdesc->cyclic) - schan->happened_cyclic = schan->completed_cyclic = 0; -} - -/* Interrupt handler */ -static irqreturn_t sirfsoc_dma_irq(int irq, void *data) -{ - struct sirfsoc_dma *sdma = data; - struct sirfsoc_dma_chan *schan; - struct sirfsoc_dma_desc *sdesc = NULL; - u32 is; - bool chain; - int ch; - void __iomem *reg; - - switch (sdma->type) { - case SIRFSOC_DMA_VER_A6: - case SIRFSOC_DMA_VER_A7V1: - is = readl(sdma->base + SIRFSOC_DMA_CH_INT); - reg = sdma->base + SIRFSOC_DMA_CH_INT; - while ((ch = fls(is) - 1) >= 0) { - is &= ~(1 << ch); - writel_relaxed(1 << ch, reg); - schan = &sdma->channels[ch]; - spin_lock(&schan->lock); - sdesc = list_first_entry(&schan->active, - struct sirfsoc_dma_desc, node); - if (!sdesc->cyclic) { - /* Execute queued descriptors */ - list_splice_tail_init(&schan->active, - &schan->completed); - dma_cookie_complete(&sdesc->desc); - if (!list_empty(&schan->queued)) - sirfsoc_dma_execute(schan); - } else - schan->happened_cyclic++; - spin_unlock(&schan->lock); - } - break; - - case SIRFSOC_DMA_VER_A7V2: - is = readl(sdma->base + SIRFSOC_DMA_INT_ATLAS7); - - reg = sdma->base + SIRFSOC_DMA_INT_ATLAS7; - writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, reg); - schan = &sdma->channels[0]; - spin_lock(&schan->lock); - sdesc = list_first_entry(&schan->active, - struct sirfsoc_dma_desc, node); - if (!sdesc->cyclic) { - chain = sdesc->chain; - if ((chain && (is & SIRFSOC_DMA_INT_END_INT_ATLAS7)) || - (!chain && - (is & SIRFSOC_DMA_INT_FINI_INT_ATLAS7))) { - /* Execute queued descriptors */ - list_splice_tail_init(&schan->active, - &schan->completed); - dma_cookie_complete(&sdesc->desc); - if (!list_empty(&schan->queued)) - sirfsoc_dma_execute(schan); - } - } else if (sdesc->cyclic && (is & - SIRFSOC_DMA_INT_LOOP_INT_ATLAS7)) - schan->happened_cyclic++; - - spin_unlock(&schan->lock); - break; - - default: - break; - } - - /* Schedule tasklet */ - tasklet_schedule(&sdma->tasklet); - - return IRQ_HANDLED; -} - -/* process completed descriptors */ -static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) -{ - dma_cookie_t last_cookie = 0; - struct sirfsoc_dma_chan *schan; - struct sirfsoc_dma_desc *sdesc; - struct dma_async_tx_descriptor *desc; - unsigned long flags; - unsigned long happened_cyclic; - LIST_HEAD(list); - int i; - - for (i = 0; i < sdma->dma.chancnt; i++) { - schan = &sdma->channels[i]; - - /* Get all completed descriptors */ - spin_lock_irqsave(&schan->lock, flags); - if (!list_empty(&schan->completed)) { - list_splice_tail_init(&schan->completed, &list); - spin_unlock_irqrestore(&schan->lock, flags); - - /* Execute callbacks and run dependencies */ - list_for_each_entry(sdesc, &list, node) { - desc = &sdesc->desc; - - dmaengine_desc_get_callback_invoke(desc, NULL); - last_cookie = desc->cookie; - dma_run_dependencies(desc); - } - - /* Free descriptors */ - spin_lock_irqsave(&schan->lock, flags); - list_splice_tail_init(&list, &schan->free); - schan->chan.completed_cookie = last_cookie; - spin_unlock_irqrestore(&schan->lock, flags); - } else { - if (list_empty(&schan->active)) { - spin_unlock_irqrestore(&schan->lock, flags); - continue; - } - - /* for cyclic channel, desc is always in active list */ - sdesc = list_first_entry(&schan->active, - struct sirfsoc_dma_desc, node); - - /* cyclic DMA */ - happened_cyclic = schan->happened_cyclic; - spin_unlock_irqrestore(&schan->lock, flags); - - desc = &sdesc->desc; - while (happened_cyclic != schan->completed_cyclic) { - dmaengine_desc_get_callback_invoke(desc, NULL); - schan->completed_cyclic++; - } - } - } -} - -/* DMA Tasklet */ -static void sirfsoc_dma_tasklet(struct tasklet_struct *t) -{ - struct sirfsoc_dma *sdma = from_tasklet(sdma, t, tasklet); - - sirfsoc_dma_process_completed(sdma); -} - -/* Submit descriptor to hardware */ -static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) -{ - struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan); - struct sirfsoc_dma_desc *sdesc; - unsigned long flags; - dma_cookie_t cookie; - - sdesc = container_of(txd, struct sirfsoc_dma_desc, desc); - - spin_lock_irqsave(&schan->lock, flags); - - /* Move descriptor to queue */ - list_move_tail(&sdesc->node, &schan->queued); - - cookie = dma_cookie_assign(txd); - - spin_unlock_irqrestore(&schan->lock, flags); - - return cookie; -} - -static int sirfsoc_dma_slave_config(struct dma_chan *chan, - struct dma_slave_config *config) -{ - struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); - unsigned long flags; - - if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || - (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)) - return -EINVAL; - - spin_lock_irqsave(&schan->lock, flags); - schan->mode = (config->src_maxburst == 4 ? 1 : 0); - spin_unlock_irqrestore(&schan->lock, flags); - - return 0; -} - -static int sirfsoc_dma_terminate_all(struct dma_chan *chan) -{ - struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); - struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); - int cid = schan->chan.chan_id; - unsigned long flags; - - spin_lock_irqsave(&schan->lock, flags); - - switch (sdma->type) { - case SIRFSOC_DMA_VER_A7V1: - writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR); - writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_INT); - writel_relaxed((1 << cid) | 1 << (cid + 16), - sdma->base + - SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7); - writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); - break; - case SIRFSOC_DMA_VER_A7V2: - writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7); - writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, - sdma->base + SIRFSOC_DMA_INT_ATLAS7); - writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); - writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7); - break; - case SIRFSOC_DMA_VER_A6: - writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & - ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); - writel_relaxed(readl_relaxed(sdma->base + - SIRFSOC_DMA_CH_LOOP_CTRL) & - ~((1 << cid) | 1 << (cid + 16)), - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); - writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); - break; - default: - break; - } - - list_splice_tail_init(&schan->active, &schan->free); - list_splice_tail_init(&schan->queued, &schan->free); - - spin_unlock_irqrestore(&schan->lock, flags); - - return 0; -} - -static int sirfsoc_dma_pause_chan(struct dma_chan *chan) -{ - struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); - struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); - int cid = schan->chan.chan_id; - unsigned long flags; - - spin_lock_irqsave(&schan->lock, flags); - - switch (sdma->type) { - case SIRFSOC_DMA_VER_A7V1: - writel_relaxed((1 << cid) | 1 << (cid + 16), - sdma->base + - SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7); - break; - case SIRFSOC_DMA_VER_A7V2: - writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); - break; - case SIRFSOC_DMA_VER_A6: - writel_relaxed(readl_relaxed(sdma->base + - SIRFSOC_DMA_CH_LOOP_CTRL) & - ~((1 << cid) | 1 << (cid + 16)), - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); - break; - - default: - break; - } - - spin_unlock_irqrestore(&schan->lock, flags); - - return 0; -} - -static int sirfsoc_dma_resume_chan(struct dma_chan *chan) -{ - struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); - struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); - int cid = schan->chan.chan_id; - unsigned long flags; - - spin_lock_irqsave(&schan->lock, flags); - switch (sdma->type) { - case SIRFSOC_DMA_VER_A7V1: - writel_relaxed((1 << cid) | 1 << (cid + 16), - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7); - break; - case SIRFSOC_DMA_VER_A7V2: - writel_relaxed(0x10001, - sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); - break; - case SIRFSOC_DMA_VER_A6: - writel_relaxed(readl_relaxed(sdma->base + - SIRFSOC_DMA_CH_LOOP_CTRL) | - ((1 << cid) | 1 << (cid + 16)), - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); - break; - - default: - break; - } - - spin_unlock_irqrestore(&schan->lock, flags); - - return 0; -} - -/* Alloc channel resources */ -static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) -{ - struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); - struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); - struct sirfsoc_dma_desc *sdesc; - unsigned long flags; - LIST_HEAD(descs); - int i; - - pm_runtime_get_sync(sdma->dma.dev); - - /* Alloc descriptors for this channel */ - for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) { - sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL); - if (!sdesc) { - dev_notice(sdma->dma.dev, "Memory allocation error. " - "Allocated only %u descriptors\n", i); - break; - } - - dma_async_tx_descriptor_init(&sdesc->desc, chan); - sdesc->desc.flags = DMA_CTRL_ACK; - sdesc->desc.tx_submit = sirfsoc_dma_tx_submit; - - list_add_tail(&sdesc->node, &descs); - } - - /* Return error only if no descriptors were allocated */ - if (i == 0) - return -ENOMEM; - - spin_lock_irqsave(&schan->lock, flags); - - list_splice_tail_init(&descs, &schan->free); - spin_unlock_irqrestore(&schan->lock, flags); - - return i; -} - -/* Free channel resources */ -static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) -{ - struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); - struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); - struct sirfsoc_dma_desc *sdesc, *tmp; - unsigned long flags; - LIST_HEAD(descs); - - spin_lock_irqsave(&schan->lock, flags); - - /* Channel must be idle */ - BUG_ON(!list_empty(&schan->prepared)); - BUG_ON(!list_empty(&schan->queued)); - BUG_ON(!list_empty(&schan->active)); - BUG_ON(!list_empty(&schan->completed)); - - /* Move data */ - list_splice_tail_init(&schan->free, &descs); - - spin_unlock_irqrestore(&schan->lock, flags); - - /* Free descriptors */ - list_for_each_entry_safe(sdesc, tmp, &descs, node) - kfree(sdesc); - - pm_runtime_put(sdma->dma.dev); -} - -/* Send pending descriptor to hardware */ -static void sirfsoc_dma_issue_pending(struct dma_chan *chan) -{ - struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); - unsigned long flags; - - spin_lock_irqsave(&schan->lock, flags); - - if (list_empty(&schan->active) && !list_empty(&schan->queued)) - sirfsoc_dma_execute(schan); - - spin_unlock_irqrestore(&schan->lock, flags); -} - -/* Check request completion status */ -static enum dma_status -sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, - struct dma_tx_state *txstate) -{ - struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); - struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); - unsigned long flags; - enum dma_status ret; - struct sirfsoc_dma_desc *sdesc; - int cid = schan->chan.chan_id; - unsigned long dma_pos; - unsigned long dma_request_bytes; - unsigned long residue; - - spin_lock_irqsave(&schan->lock, flags); - - if (list_empty(&schan->active)) { - ret = dma_cookie_status(chan, cookie, txstate); - dma_set_residue(txstate, 0); - spin_unlock_irqrestore(&schan->lock, flags); - return ret; - } - sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, node); - if (sdesc->cyclic) - dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) * - (sdesc->width * SIRFSOC_DMA_WORD_LEN); - else - dma_request_bytes = sdesc->xlen * SIRFSOC_DMA_WORD_LEN; - - ret = dma_cookie_status(chan, cookie, txstate); - - if (sdma->type == SIRFSOC_DMA_VER_A7V2) - cid = 0; - - if (sdma->type == SIRFSOC_DMA_VER_A7V2) { - dma_pos = readl_relaxed(sdma->base + SIRFSOC_DMA_CUR_DATA_ADDR); - } else { - dma_pos = readl_relaxed( - sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) << 2; - } - - residue = dma_request_bytes - (dma_pos - sdesc->addr); - dma_set_residue(txstate, residue); - - spin_unlock_irqrestore(&schan->lock, flags); - - return ret; -} - -static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( - struct dma_chan *chan, struct dma_interleaved_template *xt, - unsigned long flags) -{ - struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); - struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); - struct sirfsoc_dma_desc *sdesc = NULL; - unsigned long iflags; - int ret; - - if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) { - ret = -EINVAL; - goto err_dir; - } - - /* Get free descriptor */ - spin_lock_irqsave(&schan->lock, iflags); - if (!list_empty(&schan->free)) { - sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, - node); - list_del(&sdesc->node); - } - spin_unlock_irqrestore(&schan->lock, iflags); - - if (!sdesc) { - /* try to free completed descriptors */ - sirfsoc_dma_process_completed(sdma); - ret = 0; - goto no_desc; - } - - /* Place descriptor in prepared list */ - spin_lock_irqsave(&schan->lock, iflags); - - /* - * Number of chunks in a frame can only be 1 for prima2 - * and ylen (number of frame - 1) must be at least 0 - */ - if ((xt->frame_size == 1) && (xt->numf > 0)) { - sdesc->cyclic = 0; - sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN; - sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) / - SIRFSOC_DMA_WORD_LEN; - sdesc->ylen = xt->numf - 1; - if (xt->dir == DMA_MEM_TO_DEV) { - sdesc->addr = xt->src_start; - sdesc->dir = 1; - } else { - sdesc->addr = xt->dst_start; - sdesc->dir = 0; - } - - list_add_tail(&sdesc->node, &schan->prepared); - } else { - pr_err("sirfsoc DMA Invalid xfer\n"); - ret = -EINVAL; - goto err_xfer; - } - spin_unlock_irqrestore(&schan->lock, iflags); - - return &sdesc->desc; -err_xfer: - spin_unlock_irqrestore(&schan->lock, iflags); -no_desc: -err_dir: - return ERR_PTR(ret); -} - -static struct dma_async_tx_descriptor * -sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, - size_t buf_len, size_t period_len, - enum dma_transfer_direction direction, unsigned long flags) -{ - struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); - struct sirfsoc_dma_desc *sdesc = NULL; - unsigned long iflags; - - /* - * we only support cycle transfer with 2 period - * If the X-length is set to 0, it would be the loop mode. - * The DMA address keeps increasing until reaching the end of a loop - * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then - * the DMA address goes back to the beginning of this area. - * In loop mode, the DMA data region is divided into two parts, BUFA - * and BUFB. DMA controller generates interrupts twice in each loop: - * when the DMA address reaches the end of BUFA or the end of the - * BUFB - */ - if (buf_len != 2 * period_len) - return ERR_PTR(-EINVAL); - - /* Get free descriptor */ - spin_lock_irqsave(&schan->lock, iflags); - if (!list_empty(&schan->free)) { - sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, - node); - list_del(&sdesc->node); - } - spin_unlock_irqrestore(&schan->lock, iflags); - - if (!sdesc) - return NULL; - - /* Place descriptor in prepared list */ - spin_lock_irqsave(&schan->lock, iflags); - sdesc->addr = addr; - sdesc->cyclic = 1; - sdesc->xlen = 0; - sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1; - sdesc->width = 1; - list_add_tail(&sdesc->node, &schan->prepared); - spin_unlock_irqrestore(&schan->lock, iflags); - - return &sdesc->desc; -} - -/* - * The DMA controller consists of 16 independent DMA channels. - * Each channel is allocated to a different function - */ -bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id) -{ - unsigned int ch_nr = (unsigned int) chan_id; - - if (ch_nr == chan->chan_id + - chan->device->dev_id * SIRFSOC_DMA_CHANNELS) - return true; - - return false; -} -EXPORT_SYMBOL(sirfsoc_dma_filter_id); - -#define SIRFSOC_DMA_BUSWIDTHS \ - (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ - BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ - BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ - BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ - BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) - -static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec, - struct of_dma *ofdma) -{ - struct sirfsoc_dma *sdma = ofdma->of_dma_data; - unsigned int request = dma_spec->args[0]; - - if (request >= SIRFSOC_DMA_CHANNELS) - return NULL; - - return dma_get_slave_channel(&sdma->channels[request].chan); -} - -static int sirfsoc_dma_probe(struct platform_device *op) -{ - struct device_node *dn = op->dev.of_node; - struct device *dev = &op->dev; - struct dma_device *dma; - struct sirfsoc_dma *sdma; - struct sirfsoc_dma_chan *schan; - struct sirfsoc_dmadata *data; - struct resource res; - ulong regs_start, regs_size; - u32 id; - int ret, i; - - sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); - if (!sdma) - return -ENOMEM; - - data = (struct sirfsoc_dmadata *) - (of_match_device(op->dev.driver->of_match_table, - &op->dev)->data); - sdma->exec_desc = data->exec; - sdma->type = data->type; - - if (of_property_read_u32(dn, "cell-index", &id)) { - dev_err(dev, "Fail to get DMAC index\n"); - return -ENODEV; - } - - sdma->irq = irq_of_parse_and_map(dn, 0); - if (!sdma->irq) { - dev_err(dev, "Error mapping IRQ!\n"); - return -EINVAL; - } - - sdma->clk = devm_clk_get(dev, NULL); - if (IS_ERR(sdma->clk)) { - dev_err(dev, "failed to get a clock.\n"); - return PTR_ERR(sdma->clk); - } - - ret = of_address_to_resource(dn, 0, &res); - if (ret) { - dev_err(dev, "Error parsing memory region!\n"); - goto irq_dispose; - } - - regs_start = res.start; - regs_size = resource_size(&res); - - sdma->base = devm_ioremap(dev, regs_start, regs_size); - if (!sdma->base) { - dev_err(dev, "Error mapping memory region!\n"); - ret = -ENOMEM; - goto irq_dispose; - } - - ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma); - if (ret) { - dev_err(dev, "Error requesting IRQ!\n"); - ret = -EINVAL; - goto irq_dispose; - } - - dma = &sdma->dma; - dma->dev = dev; - - dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; - dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; - dma->device_issue_pending = sirfsoc_dma_issue_pending; - dma->device_config = sirfsoc_dma_slave_config; - dma->device_pause = sirfsoc_dma_pause_chan; - dma->device_resume = sirfsoc_dma_resume_chan; - dma->device_terminate_all = sirfsoc_dma_terminate_all; - dma->device_tx_status = sirfsoc_dma_tx_status; - dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; - dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; - dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS; - dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS; - dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - - INIT_LIST_HEAD(&dma->channels); - dma_cap_set(DMA_SLAVE, dma->cap_mask); - dma_cap_set(DMA_CYCLIC, dma->cap_mask); - dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); - dma_cap_set(DMA_PRIVATE, dma->cap_mask); - - for (i = 0; i < SIRFSOC_DMA_CHANNELS; i++) { - schan = &sdma->channels[i]; - - schan->chan.device = dma; - dma_cookie_init(&schan->chan); - - INIT_LIST_HEAD(&schan->free); - INIT_LIST_HEAD(&schan->prepared); - INIT_LIST_HEAD(&schan->queued); - INIT_LIST_HEAD(&schan->active); - INIT_LIST_HEAD(&schan->completed); - - spin_lock_init(&schan->lock); - list_add_tail(&schan->chan.device_node, &dma->channels); - } - - tasklet_setup(&sdma->tasklet, sirfsoc_dma_tasklet); - - /* Register DMA engine */ - dev_set_drvdata(dev, sdma); - - ret = dma_async_device_register(dma); - if (ret) - goto free_irq; - - /* Device-tree DMA controller registration */ - ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma); - if (ret) { - dev_err(dev, "failed to register DMA controller\n"); - goto unreg_dma_dev; - } - - pm_runtime_enable(&op->dev); - dev_info(dev, "initialized SIRFSOC DMAC driver\n"); - - return 0; - -unreg_dma_dev: - dma_async_device_unregister(dma); -free_irq: - free_irq(sdma->irq, sdma); -irq_dispose: - irq_dispose_mapping(sdma->irq); - return ret; -} - -static int sirfsoc_dma_remove(struct platform_device *op) -{ - struct device *dev = &op->dev; - struct sirfsoc_dma *sdma = dev_get_drvdata(dev); - - of_dma_controller_free(op->dev.of_node); - dma_async_device_unregister(&sdma->dma); - free_irq(sdma->irq, sdma); - tasklet_kill(&sdma->tasklet); - irq_dispose_mapping(sdma->irq); - pm_runtime_disable(&op->dev); - if (!pm_runtime_status_suspended(&op->dev)) - sirfsoc_dma_runtime_suspend(&op->dev); - - return 0; -} - -static int __maybe_unused sirfsoc_dma_runtime_suspend(struct device *dev) -{ - struct sirfsoc_dma *sdma = dev_get_drvdata(dev); - - clk_disable_unprepare(sdma->clk); - return 0; -} - -static int __maybe_unused sirfsoc_dma_runtime_resume(struct device *dev) -{ - struct sirfsoc_dma *sdma = dev_get_drvdata(dev); - int ret; - - ret = clk_prepare_enable(sdma->clk); - if (ret < 0) { - dev_err(dev, "clk_enable failed: %d\n", ret); - return ret; - } - return 0; -} - -static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev) -{ - struct sirfsoc_dma *sdma = dev_get_drvdata(dev); - struct sirfsoc_dma_regs *save = &sdma->regs_save; - struct sirfsoc_dma_chan *schan; - int ch; - int ret; - int count; - u32 int_offset; - - /* - * if we were runtime-suspended before, resume to enable clock - * before accessing register - */ - if (pm_runtime_status_suspended(dev)) { - ret = sirfsoc_dma_runtime_resume(dev); - if (ret < 0) - return ret; - } - - if (sdma->type == SIRFSOC_DMA_VER_A7V2) { - count = 1; - int_offset = SIRFSOC_DMA_INT_EN_ATLAS7; - } else { - count = SIRFSOC_DMA_CHANNELS; - int_offset = SIRFSOC_DMA_INT_EN; - } - - /* - * DMA controller will lose all registers while suspending - * so we need to save registers for active channels - */ - for (ch = 0; ch < count; ch++) { - schan = &sdma->channels[ch]; - if (list_empty(&schan->active)) - continue; - save->ctrl[ch] = readl_relaxed(sdma->base + - ch * 0x10 + SIRFSOC_DMA_CH_CTRL); - } - save->interrupt_en = readl_relaxed(sdma->base + int_offset); - - /* Disable clock */ - sirfsoc_dma_runtime_suspend(dev); - - return 0; -} - -static int __maybe_unused sirfsoc_dma_pm_resume(struct device *dev) -{ - struct sirfsoc_dma *sdma = dev_get_drvdata(dev); - struct sirfsoc_dma_regs *save = &sdma->regs_save; - struct sirfsoc_dma_desc *sdesc; - struct sirfsoc_dma_chan *schan; - int ch; - int ret; - int count; - u32 int_offset; - u32 width_offset; - - /* Enable clock before accessing register */ - ret = sirfsoc_dma_runtime_resume(dev); - if (ret < 0) - return ret; - - if (sdma->type == SIRFSOC_DMA_VER_A7V2) { - count = 1; - int_offset = SIRFSOC_DMA_INT_EN_ATLAS7; - width_offset = SIRFSOC_DMA_WIDTH_ATLAS7; - } else { - count = SIRFSOC_DMA_CHANNELS; - int_offset = SIRFSOC_DMA_INT_EN; - width_offset = SIRFSOC_DMA_WIDTH_0; - } - - writel_relaxed(save->interrupt_en, sdma->base + int_offset); - for (ch = 0; ch < count; ch++) { - schan = &sdma->channels[ch]; - if (list_empty(&schan->active)) - continue; - sdesc = list_first_entry(&schan->active, - struct sirfsoc_dma_desc, - node); - writel_relaxed(sdesc->width, - sdma->base + width_offset + ch * 4); - writel_relaxed(sdesc->xlen, - sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN); - writel_relaxed(sdesc->ylen, - sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN); - writel_relaxed(save->ctrl[ch], - sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL); - if (sdma->type == SIRFSOC_DMA_VER_A7V2) { - writel_relaxed(sdesc->addr, - sdma->base + SIRFSOC_DMA_CH_ADDR); - } else { - writel_relaxed(sdesc->addr >> 2, - sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR); - - } - } - - /* if we were runtime-suspended before, suspend again */ - if (pm_runtime_status_suspended(dev)) - sirfsoc_dma_runtime_suspend(dev); - - return 0; -} - -static const struct dev_pm_ops sirfsoc_dma_pm_ops = { - SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume) -}; - -static struct sirfsoc_dmadata sirfsoc_dmadata_a6 = { - .exec = sirfsoc_dma_execute_hw_a6, - .type = SIRFSOC_DMA_VER_A6, -}; - -static struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = { - .exec = sirfsoc_dma_execute_hw_a7v1, - .type = SIRFSOC_DMA_VER_A7V1, -}; - -static struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = { - .exec = sirfsoc_dma_execute_hw_a7v2, - .type = SIRFSOC_DMA_VER_A7V2, -}; - -static const struct of_device_id sirfsoc_dma_match[] = { - { .compatible = "sirf,prima2-dmac", .data = &sirfsoc_dmadata_a6,}, - { .compatible = "sirf,atlas7-dmac", .data = &sirfsoc_dmadata_a7v1,}, - { .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,}, - {}, -}; -MODULE_DEVICE_TABLE(of, sirfsoc_dma_match); - -static struct platform_driver sirfsoc_dma_driver = { - .probe = sirfsoc_dma_probe, - .remove = sirfsoc_dma_remove, - .driver = { - .name = DRV_NAME, - .pm = &sirfsoc_dma_pm_ops, - .of_match_table = sirfsoc_dma_match, - }, -}; - -static __init int sirfsoc_dma_init(void) -{ - return platform_driver_register(&sirfsoc_dma_driver); -} - -static void __exit sirfsoc_dma_exit(void) -{ - platform_driver_unregister(&sirfsoc_dma_driver); -} - -subsys_initcall(sirfsoc_dma_init); -module_exit(sirfsoc_dma_exit); - -MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>"); -MODULE_AUTHOR("Barry Song <baohua.song@csr.com>"); -MODULE_DESCRIPTION("SIRFSOC DMA control driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 4256e55bbf25..265d7c07b348 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -78,7 +78,7 @@ static int dma40_memcpy_channels[] = { DB8500_DMA_MEMCPY_EV_5, }; -/* Default configuration for physcial memcpy */ +/* Default configuration for physical memcpy */ static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = { .mode = STEDMA40_MODE_PHYSICAL, .dir = DMA_MEM_TO_MEM, diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index f474a1232335..96ad21869ba7 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -121,6 +121,11 @@ struct udma_oes_offsets { #define UDMA_FLAG_PDMA_ACC32 BIT(0) #define UDMA_FLAG_PDMA_BURST BIT(1) #define UDMA_FLAG_TDTYPE BIT(2) +#define UDMA_FLAG_BURST_SIZE BIT(3) +#define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \ + UDMA_FLAG_PDMA_BURST | \ + UDMA_FLAG_TDTYPE | \ + UDMA_FLAG_BURST_SIZE) struct udma_match_data { enum k3_dma_type type; @@ -128,6 +133,7 @@ struct udma_match_data { bool enable_memcpy_support; u32 flags; u32 statictr_z_mask; + u8 burst_size[3]; }; struct udma_soc_data { @@ -436,6 +442,18 @@ static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel) } } +static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id) +{ + int i; + + for (i = 0; i < tpl_map->levels; i++) { + if (chan_id >= tpl_map->start_idx[i]) + return i; + } + + return 0; +} + static void udma_reset_uchan(struct udma_chan *uc) { memset(&uc->config, 0, sizeof(uc->config)); @@ -1811,13 +1829,21 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc) const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; struct udma_tchan *tchan = uc->tchan; struct udma_rchan *rchan = uc->rchan; - int ret = 0; + u8 burst_size = 0; + int ret; + u8 tpl; /* Non synchronized - mem to mem type of transfer */ int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; + if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { + tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id); + + burst_size = ud->match_data->burst_size[tpl]; + } + req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS; req_tx.nav_id = tisci_rm->tisci_dev_id; req_tx.index = tchan->id; @@ -1825,6 +1851,10 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc) req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; req_tx.txcq_qnum = tc_ring; req_tx.tx_atype = ud->atype; + if (burst_size) { + req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; + req_tx.tx_burst_size = burst_size; + } ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); if (ret) { @@ -1839,6 +1869,10 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc) req_rx.rxcq_qnum = tc_ring; req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; req_rx.rx_atype = ud->atype; + if (burst_size) { + req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; + req_rx.rx_burst_size = burst_size; + } ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); if (ret) @@ -1854,12 +1888,24 @@ static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc) const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; struct udma_bchan *bchan = uc->bchan; - int ret = 0; + u8 burst_size = 0; + int ret; + u8 tpl; + + if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { + tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id); + + burst_size = ud->match_data->burst_size[tpl]; + } req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS; req_tx.nav_id = tisci_rm->tisci_dev_id; req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN; req_tx.index = bchan->id; + if (burst_size) { + req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; + req_tx.tx_burst_size = burst_size; + } ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); if (ret) @@ -1877,7 +1923,7 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc) int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; u32 mode, fetch_size; - int ret = 0; + int ret; if (uc->config.pkt_mode) { mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; @@ -1918,7 +1964,7 @@ static int bcdma_tisci_tx_channel_config(struct udma_chan *uc) const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; struct udma_tchan *tchan = uc->tchan; struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; - int ret = 0; + int ret; req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS; req_tx.nav_id = tisci_rm->tisci_dev_id; @@ -1951,7 +1997,7 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc) struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; u32 mode, fetch_size; - int ret = 0; + int ret; if (uc->config.pkt_mode) { mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; @@ -2028,7 +2074,7 @@ static int bcdma_tisci_rx_channel_config(struct udma_chan *uc) const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; struct udma_rchan *rchan = uc->rchan; struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; - int ret = 0; + int ret; req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; req_rx.nav_id = tisci_rm->tisci_dev_id; @@ -2048,7 +2094,7 @@ static int pktdma_tisci_rx_channel_config(struct udma_chan *uc) const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; - int ret = 0; + int ret; req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; req_rx.nav_id = tisci_rm->tisci_dev_id; @@ -4168,6 +4214,11 @@ static struct udma_match_data am654_main_data = { .psil_base = 0x1000, .enable_memcpy_support = true, .statictr_z_mask = GENMASK(11, 0), + .burst_size = { + TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ + TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ + 0, /* No UH Channels */ + }, }; static struct udma_match_data am654_mcu_data = { @@ -4175,38 +4226,63 @@ static struct udma_match_data am654_mcu_data = { .psil_base = 0x6000, .enable_memcpy_support = false, .statictr_z_mask = GENMASK(11, 0), + .burst_size = { + TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ + TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ + 0, /* No UH Channels */ + }, }; static struct udma_match_data j721e_main_data = { .type = DMA_TYPE_UDMA, .psil_base = 0x1000, .enable_memcpy_support = true, - .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE, + .flags = UDMA_FLAGS_J7_CLASS, .statictr_z_mask = GENMASK(23, 0), + .burst_size = { + TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ + TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */ + TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */ + }, }; static struct udma_match_data j721e_mcu_data = { .type = DMA_TYPE_UDMA, .psil_base = 0x6000, .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */ - .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE, + .flags = UDMA_FLAGS_J7_CLASS, .statictr_z_mask = GENMASK(23, 0), + .burst_size = { + TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ + TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */ + 0, /* No UH Channels */ + }, }; static struct udma_match_data am64_bcdma_data = { .type = DMA_TYPE_BCDMA, .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */ .enable_memcpy_support = true, /* Supported via bchan */ - .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE, + .flags = UDMA_FLAGS_J7_CLASS, .statictr_z_mask = GENMASK(23, 0), + .burst_size = { + TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ + 0, /* No H Channels */ + 0, /* No UH Channels */ + }, }; static struct udma_match_data am64_pktdma_data = { .type = DMA_TYPE_PKTDMA, .psil_base = 0x1000, .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */ - .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE, + .flags = UDMA_FLAGS_J7_CLASS, .statictr_z_mask = GENMASK(23, 0), + .burst_size = { + TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ + 0, /* No H Channels */ + 0, /* No UH Channels */ + }, }; static const struct of_device_id udma_of_match[] = { @@ -4306,6 +4382,7 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud) ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2); ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2); ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2); + ud->rflow_cnt = ud->rchan_cnt; break; case DMA_TYPE_PKTDMA: cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30); @@ -5046,6 +5123,34 @@ static void udma_dbg_summary_show(struct seq_file *s, } #endif /* CONFIG_DEBUG_FS */ +static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud) +{ + const struct udma_match_data *match_data = ud->match_data; + u8 tpl; + + if (!match_data->enable_memcpy_support) + return DMAENGINE_ALIGN_8_BYTES; + + /* Get the highest TPL level the device supports for memcpy */ + if (ud->bchan_cnt) + tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0); + else if (ud->tchan_cnt) + tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0); + else + return DMAENGINE_ALIGN_8_BYTES; + + switch (match_data->burst_size[tpl]) { + case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES: + return DMAENGINE_ALIGN_256_BYTES; + case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES: + return DMAENGINE_ALIGN_128_BYTES; + case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES: + fallthrough; + default: + return DMAENGINE_ALIGN_64_BYTES; + } +} + #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ @@ -5202,7 +5307,6 @@ static int udma_probe(struct platform_device *pdev) ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; - ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES; ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | DESC_METADATA_ENGINE; if (ud->match_data->enable_memcpy_support && @@ -5284,6 +5388,9 @@ static int udma_probe(struct platform_device *pdev) INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion); } + /* Configure the copy_align to the maximum burst size the device supports */ + ud->ddev.copy_align = udma_get_copy_align(ud); + ret = dma_async_device_register(&ud->ddev); if (ret) { dev_err(dev, "failed to register slave DMA engine: %d\n", ret); diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 79777550a6ff..3aded7861fef 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -800,7 +800,7 @@ xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) { struct xilinx_dma_tx_descriptor *desc; - desc = kzalloc(sizeof(*desc), GFP_KERNEL); + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); if (!desc) return NULL; diff --git a/drivers/dma/zx_dma.c b/drivers/dma/zx_dma.c deleted file mode 100644 index b057582b2fac..000000000000 --- a/drivers/dma/zx_dma.c +++ /dev/null @@ -1,941 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2015 Linaro. - */ -#include <linux/sched.h> -#include <linux/device.h> -#include <linux/dmaengine.h> -#include <linux/dma-mapping.h> -#include <linux/dmapool.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/of_device.h> -#include <linux/of.h> -#include <linux/clk.h> -#include <linux/of_dma.h> - -#include "virt-dma.h" - -#define DRIVER_NAME "zx-dma" -#define DMA_ALIGN 4 -#define DMA_MAX_SIZE (0x10000 - 512) -#define LLI_BLOCK_SIZE (4 * PAGE_SIZE) - -#define REG_ZX_SRC_ADDR 0x00 -#define REG_ZX_DST_ADDR 0x04 -#define REG_ZX_TX_X_COUNT 0x08 -#define REG_ZX_TX_ZY_COUNT 0x0c -#define REG_ZX_SRC_ZY_STEP 0x10 -#define REG_ZX_DST_ZY_STEP 0x14 -#define REG_ZX_LLI_ADDR 0x1c -#define REG_ZX_CTRL 0x20 -#define REG_ZX_TC_IRQ 0x800 -#define REG_ZX_SRC_ERR_IRQ 0x804 -#define REG_ZX_DST_ERR_IRQ 0x808 -#define REG_ZX_CFG_ERR_IRQ 0x80c -#define REG_ZX_TC_IRQ_RAW 0x810 -#define REG_ZX_SRC_ERR_IRQ_RAW 0x814 -#define REG_ZX_DST_ERR_IRQ_RAW 0x818 -#define REG_ZX_CFG_ERR_IRQ_RAW 0x81c -#define REG_ZX_STATUS 0x820 -#define REG_ZX_DMA_GRP_PRIO 0x824 -#define REG_ZX_DMA_ARB 0x828 - -#define ZX_FORCE_CLOSE BIT(31) -#define ZX_DST_BURST_WIDTH(x) (((x) & 0x7) << 13) -#define ZX_MAX_BURST_LEN 16 -#define ZX_SRC_BURST_LEN(x) (((x) & 0xf) << 9) -#define ZX_SRC_BURST_WIDTH(x) (((x) & 0x7) << 6) -#define ZX_IRQ_ENABLE_ALL (3 << 4) -#define ZX_DST_FIFO_MODE BIT(3) -#define ZX_SRC_FIFO_MODE BIT(2) -#define ZX_SOFT_REQ BIT(1) -#define ZX_CH_ENABLE BIT(0) - -#define ZX_DMA_BUSWIDTHS \ - (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ - BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ - BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ - BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ - BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) - -enum zx_dma_burst_width { - ZX_DMA_WIDTH_8BIT = 0, - ZX_DMA_WIDTH_16BIT = 1, - ZX_DMA_WIDTH_32BIT = 2, - ZX_DMA_WIDTH_64BIT = 3, -}; - -struct zx_desc_hw { - u32 saddr; - u32 daddr; - u32 src_x; - u32 src_zy; - u32 src_zy_step; - u32 dst_zy_step; - u32 reserved1; - u32 lli; - u32 ctr; - u32 reserved[7]; /* pack as hardware registers region size */ -} __aligned(32); - -struct zx_dma_desc_sw { - struct virt_dma_desc vd; - dma_addr_t desc_hw_lli; - size_t desc_num; - size_t size; - struct zx_desc_hw *desc_hw; -}; - -struct zx_dma_phy; - -struct zx_dma_chan { - struct dma_slave_config slave_cfg; - int id; /* Request phy chan id */ - u32 ccfg; - u32 cyclic; - struct virt_dma_chan vc; - struct zx_dma_phy *phy; - struct list_head node; - dma_addr_t dev_addr; - enum dma_status status; -}; - -struct zx_dma_phy { - u32 idx; - void __iomem *base; - struct zx_dma_chan *vchan; - struct zx_dma_desc_sw *ds_run; - struct zx_dma_desc_sw *ds_done; -}; - -struct zx_dma_dev { - struct dma_device slave; - void __iomem *base; - spinlock_t lock; /* lock for ch and phy */ - struct list_head chan_pending; - struct zx_dma_phy *phy; - struct zx_dma_chan *chans; - struct clk *clk; - struct dma_pool *pool; - u32 dma_channels; - u32 dma_requests; - int irq; -}; - -#define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave) - -static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan) -{ - return container_of(chan, struct zx_dma_chan, vc.chan); -} - -static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d) -{ - u32 val = 0; - - val = readl_relaxed(phy->base + REG_ZX_CTRL); - val &= ~ZX_CH_ENABLE; - val |= ZX_FORCE_CLOSE; - writel_relaxed(val, phy->base + REG_ZX_CTRL); - - val = 0x1 << phy->idx; - writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW); - writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW); - writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW); - writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW); -} - -static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw) -{ - writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR); - writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR); - writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT); - writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT); - writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP); - writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP); - writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR); - writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL); -} - -static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy) -{ - return readl_relaxed(phy->base + REG_ZX_LLI_ADDR); -} - -static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d) -{ - return readl_relaxed(d->base + REG_ZX_STATUS); -} - -static void zx_dma_init_state(struct zx_dma_dev *d) -{ - /* set same priority */ - writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB); - /* clear all irq */ - writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW); - writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW); - writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW); - writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW); -} - -static int zx_dma_start_txd(struct zx_dma_chan *c) -{ - struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device); - struct virt_dma_desc *vd = vchan_next_desc(&c->vc); - - if (!c->phy) - return -EAGAIN; - - if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d)) - return -EAGAIN; - - if (vd) { - struct zx_dma_desc_sw *ds = - container_of(vd, struct zx_dma_desc_sw, vd); - /* - * fetch and remove request from vc->desc_issued - * so vc->desc_issued only contains desc pending - */ - list_del(&ds->vd.node); - c->phy->ds_run = ds; - c->phy->ds_done = NULL; - /* start dma */ - zx_dma_set_desc(c->phy, ds->desc_hw); - return 0; - } - c->phy->ds_done = NULL; - c->phy->ds_run = NULL; - return -EAGAIN; -} - -static void zx_dma_task(struct zx_dma_dev *d) -{ - struct zx_dma_phy *p; - struct zx_dma_chan *c, *cn; - unsigned pch, pch_alloc = 0; - unsigned long flags; - - /* check new dma request of running channel in vc->desc_issued */ - list_for_each_entry_safe(c, cn, &d->slave.channels, - vc.chan.device_node) { - spin_lock_irqsave(&c->vc.lock, flags); - p = c->phy; - if (p && p->ds_done && zx_dma_start_txd(c)) { - /* No current txd associated with this channel */ - dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx); - /* Mark this channel free */ - c->phy = NULL; - p->vchan = NULL; - } - spin_unlock_irqrestore(&c->vc.lock, flags); - } - - /* check new channel request in d->chan_pending */ - spin_lock_irqsave(&d->lock, flags); - while (!list_empty(&d->chan_pending)) { - c = list_first_entry(&d->chan_pending, - struct zx_dma_chan, node); - p = &d->phy[c->id]; - if (!p->vchan) { - /* remove from d->chan_pending */ - list_del_init(&c->node); - pch_alloc |= 1 << c->id; - /* Mark this channel allocated */ - p->vchan = c; - c->phy = p; - } else { - dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id); - } - } - spin_unlock_irqrestore(&d->lock, flags); - - for (pch = 0; pch < d->dma_channels; pch++) { - if (pch_alloc & (1 << pch)) { - p = &d->phy[pch]; - c = p->vchan; - if (c) { - spin_lock_irqsave(&c->vc.lock, flags); - zx_dma_start_txd(c); - spin_unlock_irqrestore(&c->vc.lock, flags); - } - } - } -} - -static irqreturn_t zx_dma_int_handler(int irq, void *dev_id) -{ - struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id; - struct zx_dma_phy *p; - struct zx_dma_chan *c; - u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ); - u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ); - u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ); - u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ); - u32 i, irq_chan = 0, task = 0; - - while (tc) { - i = __ffs(tc); - tc &= ~BIT(i); - p = &d->phy[i]; - c = p->vchan; - if (c) { - spin_lock(&c->vc.lock); - if (c->cyclic) { - vchan_cyclic_callback(&p->ds_run->vd); - } else { - vchan_cookie_complete(&p->ds_run->vd); - p->ds_done = p->ds_run; - task = 1; - } - spin_unlock(&c->vc.lock); - irq_chan |= BIT(i); - } - } - - if (serr || derr || cfg) - dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n", - serr, derr, cfg); - - writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW); - writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW); - writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW); - writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW); - - if (task) - zx_dma_task(d); - return IRQ_HANDLED; -} - -static void zx_dma_free_chan_resources(struct dma_chan *chan) -{ - struct zx_dma_chan *c = to_zx_chan(chan); - struct zx_dma_dev *d = to_zx_dma(chan->device); - unsigned long flags; - - spin_lock_irqsave(&d->lock, flags); - list_del_init(&c->node); - spin_unlock_irqrestore(&d->lock, flags); - - vchan_free_chan_resources(&c->vc); - c->ccfg = 0; -} - -static enum dma_status zx_dma_tx_status(struct dma_chan *chan, - dma_cookie_t cookie, - struct dma_tx_state *state) -{ - struct zx_dma_chan *c = to_zx_chan(chan); - struct zx_dma_phy *p; - struct virt_dma_desc *vd; - unsigned long flags; - enum dma_status ret; - size_t bytes = 0; - - ret = dma_cookie_status(&c->vc.chan, cookie, state); - if (ret == DMA_COMPLETE || !state) - return ret; - - spin_lock_irqsave(&c->vc.lock, flags); - p = c->phy; - ret = c->status; - - /* - * If the cookie is on our issue queue, then the residue is - * its total size. - */ - vd = vchan_find_desc(&c->vc, cookie); - if (vd) { - bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size; - } else if ((!p) || (!p->ds_run)) { - bytes = 0; - } else { - struct zx_dma_desc_sw *ds = p->ds_run; - u32 clli = 0, index = 0; - - bytes = 0; - clli = zx_dma_get_curr_lli(p); - index = (clli - ds->desc_hw_lli) / - sizeof(struct zx_desc_hw) + 1; - for (; index < ds->desc_num; index++) { - bytes += ds->desc_hw[index].src_x; - /* end of lli */ - if (!ds->desc_hw[index].lli) - break; - } - } - spin_unlock_irqrestore(&c->vc.lock, flags); - dma_set_residue(state, bytes); - return ret; -} - -static void zx_dma_issue_pending(struct dma_chan *chan) -{ - struct zx_dma_chan *c = to_zx_chan(chan); - struct zx_dma_dev *d = to_zx_dma(chan->device); - unsigned long flags; - int issue = 0; - - spin_lock_irqsave(&c->vc.lock, flags); - /* add request to vc->desc_issued */ - if (vchan_issue_pending(&c->vc)) { - spin_lock(&d->lock); - if (!c->phy && list_empty(&c->node)) { - /* if new channel, add chan_pending */ - list_add_tail(&c->node, &d->chan_pending); - issue = 1; - dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); - } - spin_unlock(&d->lock); - } else { - dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); - } - spin_unlock_irqrestore(&c->vc.lock, flags); - - if (issue) - zx_dma_task(d); -} - -static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst, - dma_addr_t src, size_t len, u32 num, u32 ccfg) -{ - if ((num + 1) < ds->desc_num) - ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * - sizeof(struct zx_desc_hw); - ds->desc_hw[num].saddr = src; - ds->desc_hw[num].daddr = dst; - ds->desc_hw[num].src_x = len; - ds->desc_hw[num].ctr = ccfg; -} - -static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num, - struct dma_chan *chan) -{ - struct zx_dma_chan *c = to_zx_chan(chan); - struct zx_dma_desc_sw *ds; - struct zx_dma_dev *d = to_zx_dma(chan->device); - int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw); - - if (num > lli_limit) { - dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n", - &c->vc, num, lli_limit); - return NULL; - } - - ds = kzalloc(sizeof(*ds), GFP_ATOMIC); - if (!ds) - return NULL; - - ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); - if (!ds->desc_hw) { - dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); - kfree(ds); - return NULL; - } - ds->desc_num = num; - return ds; -} - -static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width) -{ - switch (width) { - case DMA_SLAVE_BUSWIDTH_1_BYTE: - case DMA_SLAVE_BUSWIDTH_2_BYTES: - case DMA_SLAVE_BUSWIDTH_4_BYTES: - case DMA_SLAVE_BUSWIDTH_8_BYTES: - return ffs(width) - 1; - default: - return ZX_DMA_WIDTH_32BIT; - } -} - -static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir) -{ - struct dma_slave_config *cfg = &c->slave_cfg; - enum zx_dma_burst_width src_width; - enum zx_dma_burst_width dst_width; - u32 maxburst = 0; - - switch (dir) { - case DMA_MEM_TO_MEM: - c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ - | ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1) - | ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT) - | ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT); - break; - case DMA_MEM_TO_DEV: - c->dev_addr = cfg->dst_addr; - /* dst len is calculated from src width, len and dst width. - * We need make sure dst len not exceed MAX LEN. - * Trailing single transaction that does not fill a full - * burst also require identical src/dst data width. - */ - dst_width = zx_dma_burst_width(cfg->dst_addr_width); - maxburst = cfg->dst_maxburst; - maxburst = maxburst < ZX_MAX_BURST_LEN ? - maxburst : ZX_MAX_BURST_LEN; - c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE - | ZX_SRC_BURST_LEN(maxburst - 1) - | ZX_SRC_BURST_WIDTH(dst_width) - | ZX_DST_BURST_WIDTH(dst_width); - break; - case DMA_DEV_TO_MEM: - c->dev_addr = cfg->src_addr; - src_width = zx_dma_burst_width(cfg->src_addr_width); - maxburst = cfg->src_maxburst; - maxburst = maxburst < ZX_MAX_BURST_LEN ? - maxburst : ZX_MAX_BURST_LEN; - c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE - | ZX_SRC_BURST_LEN(maxburst - 1) - | ZX_SRC_BURST_WIDTH(src_width) - | ZX_DST_BURST_WIDTH(src_width); - break; - default: - return -EINVAL; - } - return 0; -} - -static struct dma_async_tx_descriptor *zx_dma_prep_memcpy( - struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, - size_t len, unsigned long flags) -{ - struct zx_dma_chan *c = to_zx_chan(chan); - struct zx_dma_desc_sw *ds; - size_t copy = 0; - int num = 0; - - if (!len) - return NULL; - - if (zx_pre_config(c, DMA_MEM_TO_MEM)) - return NULL; - - num = DIV_ROUND_UP(len, DMA_MAX_SIZE); - - ds = zx_alloc_desc_resource(num, chan); - if (!ds) - return NULL; - - ds->size = len; - num = 0; - - do { - copy = min_t(size_t, len, DMA_MAX_SIZE); - zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); - - src += copy; - dst += copy; - len -= copy; - } while (len); - - c->cyclic = 0; - ds->desc_hw[num - 1].lli = 0; /* end of link */ - ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL; - return vchan_tx_prep(&c->vc, &ds->vd, flags); -} - -static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg( - struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen, - enum dma_transfer_direction dir, unsigned long flags, void *context) -{ - struct zx_dma_chan *c = to_zx_chan(chan); - struct zx_dma_desc_sw *ds; - size_t len, avail, total = 0; - struct scatterlist *sg; - dma_addr_t addr, src = 0, dst = 0; - int num = sglen, i; - - if (!sgl) - return NULL; - - if (zx_pre_config(c, dir)) - return NULL; - - for_each_sg(sgl, sg, sglen, i) { - avail = sg_dma_len(sg); - if (avail > DMA_MAX_SIZE) - num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; - } - - ds = zx_alloc_desc_resource(num, chan); - if (!ds) - return NULL; - - c->cyclic = 0; - num = 0; - for_each_sg(sgl, sg, sglen, i) { - addr = sg_dma_address(sg); - avail = sg_dma_len(sg); - total += avail; - - do { - len = min_t(size_t, avail, DMA_MAX_SIZE); - - if (dir == DMA_MEM_TO_DEV) { - src = addr; - dst = c->dev_addr; - } else if (dir == DMA_DEV_TO_MEM) { - src = c->dev_addr; - dst = addr; - } - - zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg); - - addr += len; - avail -= len; - } while (avail); - } - - ds->desc_hw[num - 1].lli = 0; /* end of link */ - ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL; - ds->size = total; - return vchan_tx_prep(&c->vc, &ds->vd, flags); -} - -static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic( - struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, - size_t period_len, enum dma_transfer_direction dir, - unsigned long flags) -{ - struct zx_dma_chan *c = to_zx_chan(chan); - struct zx_dma_desc_sw *ds; - dma_addr_t src = 0, dst = 0; - int num_periods = buf_len / period_len; - int buf = 0, num = 0; - - if (period_len > DMA_MAX_SIZE) { - dev_err(chan->device->dev, "maximum period size exceeded\n"); - return NULL; - } - - if (zx_pre_config(c, dir)) - return NULL; - - ds = zx_alloc_desc_resource(num_periods, chan); - if (!ds) - return NULL; - c->cyclic = 1; - - while (buf < buf_len) { - if (dir == DMA_MEM_TO_DEV) { - src = dma_addr; - dst = c->dev_addr; - } else if (dir == DMA_DEV_TO_MEM) { - src = c->dev_addr; - dst = dma_addr; - } - zx_dma_fill_desc(ds, dst, src, period_len, num++, - c->ccfg | ZX_IRQ_ENABLE_ALL); - dma_addr += period_len; - buf += period_len; - } - - ds->desc_hw[num - 1].lli = ds->desc_hw_lli; - ds->size = buf_len; - return vchan_tx_prep(&c->vc, &ds->vd, flags); -} - -static int zx_dma_config(struct dma_chan *chan, - struct dma_slave_config *cfg) -{ - struct zx_dma_chan *c = to_zx_chan(chan); - - if (!cfg) - return -EINVAL; - - memcpy(&c->slave_cfg, cfg, sizeof(*cfg)); - - return 0; -} - -static int zx_dma_terminate_all(struct dma_chan *chan) -{ - struct zx_dma_chan *c = to_zx_chan(chan); - struct zx_dma_dev *d = to_zx_dma(chan->device); - struct zx_dma_phy *p = c->phy; - unsigned long flags; - LIST_HEAD(head); - - dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); - - /* Prevent this channel being scheduled */ - spin_lock(&d->lock); - list_del_init(&c->node); - spin_unlock(&d->lock); - - /* Clear the tx descriptor lists */ - spin_lock_irqsave(&c->vc.lock, flags); - vchan_get_all_descriptors(&c->vc, &head); - if (p) { - /* vchan is assigned to a pchan - stop the channel */ - zx_dma_terminate_chan(p, d); - c->phy = NULL; - p->vchan = NULL; - p->ds_run = NULL; - p->ds_done = NULL; - } - spin_unlock_irqrestore(&c->vc.lock, flags); - vchan_dma_desc_free_list(&c->vc, &head); - - return 0; -} - -static int zx_dma_transfer_pause(struct dma_chan *chan) -{ - struct zx_dma_chan *c = to_zx_chan(chan); - u32 val = 0; - - val = readl_relaxed(c->phy->base + REG_ZX_CTRL); - val &= ~ZX_CH_ENABLE; - writel_relaxed(val, c->phy->base + REG_ZX_CTRL); - - return 0; -} - -static int zx_dma_transfer_resume(struct dma_chan *chan) -{ - struct zx_dma_chan *c = to_zx_chan(chan); - u32 val = 0; - - val = readl_relaxed(c->phy->base + REG_ZX_CTRL); - val |= ZX_CH_ENABLE; - writel_relaxed(val, c->phy->base + REG_ZX_CTRL); - - return 0; -} - -static void zx_dma_free_desc(struct virt_dma_desc *vd) -{ - struct zx_dma_desc_sw *ds = - container_of(vd, struct zx_dma_desc_sw, vd); - struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device); - - dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); - kfree(ds); -} - -static const struct of_device_id zx6702_dma_dt_ids[] = { - { .compatible = "zte,zx296702-dma", }, - {} -}; -MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids); - -static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec, - struct of_dma *ofdma) -{ - struct zx_dma_dev *d = ofdma->of_dma_data; - unsigned int request = dma_spec->args[0]; - struct dma_chan *chan; - struct zx_dma_chan *c; - - if (request >= d->dma_requests) - return NULL; - - chan = dma_get_any_slave_channel(&d->slave); - if (!chan) { - dev_err(d->slave.dev, "get channel fail in %s.\n", __func__); - return NULL; - } - c = to_zx_chan(chan); - c->id = request; - dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n", - c->id, &c->vc); - return chan; -} - -static int zx_dma_probe(struct platform_device *op) -{ - struct zx_dma_dev *d; - int i, ret = 0; - - d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); - if (!d) - return -ENOMEM; - - d->base = devm_platform_ioremap_resource(op, 0); - if (IS_ERR(d->base)) - return PTR_ERR(d->base); - - of_property_read_u32((&op->dev)->of_node, - "dma-channels", &d->dma_channels); - of_property_read_u32((&op->dev)->of_node, - "dma-requests", &d->dma_requests); - if (!d->dma_requests || !d->dma_channels) - return -EINVAL; - - d->clk = devm_clk_get(&op->dev, NULL); - if (IS_ERR(d->clk)) { - dev_err(&op->dev, "no dma clk\n"); - return PTR_ERR(d->clk); - } - - d->irq = platform_get_irq(op, 0); - ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler, - 0, DRIVER_NAME, d); - if (ret) - return ret; - - /* A DMA memory pool for LLIs, align on 32-byte boundary */ - d->pool = dmam_pool_create(DRIVER_NAME, &op->dev, - LLI_BLOCK_SIZE, 32, 0); - if (!d->pool) - return -ENOMEM; - - /* init phy channel */ - d->phy = devm_kcalloc(&op->dev, - d->dma_channels, sizeof(struct zx_dma_phy), GFP_KERNEL); - if (!d->phy) - return -ENOMEM; - - for (i = 0; i < d->dma_channels; i++) { - struct zx_dma_phy *p = &d->phy[i]; - - p->idx = i; - p->base = d->base + i * 0x40; - } - - INIT_LIST_HEAD(&d->slave.channels); - dma_cap_set(DMA_SLAVE, d->slave.cap_mask); - dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); - dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); - dma_cap_set(DMA_PRIVATE, d->slave.cap_mask); - d->slave.dev = &op->dev; - d->slave.device_free_chan_resources = zx_dma_free_chan_resources; - d->slave.device_tx_status = zx_dma_tx_status; - d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy; - d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg; - d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic; - d->slave.device_issue_pending = zx_dma_issue_pending; - d->slave.device_config = zx_dma_config; - d->slave.device_terminate_all = zx_dma_terminate_all; - d->slave.device_pause = zx_dma_transfer_pause; - d->slave.device_resume = zx_dma_transfer_resume; - d->slave.copy_align = DMA_ALIGN; - d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS; - d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS; - d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV) - | BIT(DMA_DEV_TO_MEM); - d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; - - /* init virtual channel */ - d->chans = devm_kcalloc(&op->dev, - d->dma_requests, sizeof(struct zx_dma_chan), GFP_KERNEL); - if (!d->chans) - return -ENOMEM; - - for (i = 0; i < d->dma_requests; i++) { - struct zx_dma_chan *c = &d->chans[i]; - - c->status = DMA_IN_PROGRESS; - INIT_LIST_HEAD(&c->node); - c->vc.desc_free = zx_dma_free_desc; - vchan_init(&c->vc, &d->slave); - } - - /* Enable clock before accessing registers */ - ret = clk_prepare_enable(d->clk); - if (ret < 0) { - dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret); - goto zx_dma_out; - } - - zx_dma_init_state(d); - - spin_lock_init(&d->lock); - INIT_LIST_HEAD(&d->chan_pending); - platform_set_drvdata(op, d); - - ret = dma_async_device_register(&d->slave); - if (ret) - goto clk_dis; - - ret = of_dma_controller_register((&op->dev)->of_node, - zx_of_dma_simple_xlate, d); - if (ret) - goto of_dma_register_fail; - - dev_info(&op->dev, "initialized\n"); - return 0; - -of_dma_register_fail: - dma_async_device_unregister(&d->slave); -clk_dis: - clk_disable_unprepare(d->clk); -zx_dma_out: - return ret; -} - -static int zx_dma_remove(struct platform_device *op) -{ - struct zx_dma_chan *c, *cn; - struct zx_dma_dev *d = platform_get_drvdata(op); - - /* explictly free the irq */ - devm_free_irq(&op->dev, d->irq, d); - - dma_async_device_unregister(&d->slave); - of_dma_controller_free((&op->dev)->of_node); - - list_for_each_entry_safe(c, cn, &d->slave.channels, - vc.chan.device_node) { - list_del(&c->vc.chan.device_node); - } - clk_disable_unprepare(d->clk); - - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static int zx_dma_suspend_dev(struct device *dev) -{ - struct zx_dma_dev *d = dev_get_drvdata(dev); - u32 stat = 0; - - stat = zx_dma_get_chan_stat(d); - if (stat) { - dev_warn(d->slave.dev, - "chan %d is running fail to suspend\n", stat); - return -1; - } - clk_disable_unprepare(d->clk); - return 0; -} - -static int zx_dma_resume_dev(struct device *dev) -{ - struct zx_dma_dev *d = dev_get_drvdata(dev); - int ret = 0; - - ret = clk_prepare_enable(d->clk); - if (ret < 0) { - dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret); - return ret; - } - zx_dma_init_state(d); - return 0; -} -#endif - -static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev); - -static struct platform_driver zx_pdma_driver = { - .driver = { - .name = DRIVER_NAME, - .pm = &zx_dma_pmops, - .of_match_table = zx6702_dma_dt_ids, - }, - .probe = zx_dma_probe, - .remove = zx_dma_remove, -}; - -module_platform_driver(zx_pdma_driver); - -MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver"); -MODULE_AUTHOR("Jun Nie jun.nie@linaro.org"); -MODULE_LICENSE("GPL v2"); diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h index 36e22c5a0f29..5f106d852f1c 100644 --- a/include/linux/dma/k3-psil.h +++ b/include/linux/dma/k3-psil.h @@ -42,14 +42,14 @@ enum psil_endpoint_type { /** * struct psil_endpoint_config - PSI-L Endpoint configuration * @ep_type: PSI-L endpoint type + * @channel_tpl: Desired throughput level for the channel * @pkt_mode: If set, the channel must be in Packet mode, otherwise in * TR mode * @notdpkt: TDCM must be suppressed on the TX channel * @needs_epib: Endpoint needs EPIB - * @psd_size: If set, PSdata is used by the endpoint - * @channel_tpl: Desired throughput level for the channel * @pdma_acc32: ACC32 must be enabled on the PDMA side * @pdma_burst: BURST must be enabled on the PDMA side + * @psd_size: If set, PSdata is used by the endpoint * @mapped_channel_id: PKTDMA thread to channel mapping for mapped channels. * The thread must be serviced by the specified channel if * mapped_channel_id is >= 0 in case of PKTDMA @@ -62,23 +62,22 @@ enum psil_endpoint_type { */ struct psil_endpoint_config { enum psil_endpoint_type ep_type; + enum udma_tp_level channel_tpl; unsigned pkt_mode:1; unsigned notdpkt:1; unsigned needs_epib:1; - u32 psd_size; - enum udma_tp_level channel_tpl; - /* PDMA properties, valid for PSIL_EP_PDMA_* */ unsigned pdma_acc32:1; unsigned pdma_burst:1; + u32 psd_size; /* PKDMA mapped channel */ - int mapped_channel_id; + s16 mapped_channel_id; /* PKTDMA tflow and rflow ranges for mapped channel */ u16 flow_start; u16 flow_num; - u16 default_flow_id; + s16 default_flow_id; }; int psil_set_new_ep_config(struct device *dev, const char *name, diff --git a/include/linux/dma/mmp-pdma.h b/include/linux/dma/mmp-pdma.h deleted file mode 100644 index 25cab62a28c4..000000000000 --- a/include/linux/dma/mmp-pdma.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _MMP_PDMA_H_ -#define _MMP_PDMA_H_ - -struct dma_chan; - -#ifdef CONFIG_MMP_PDMA -bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param); -#else -static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param) -{ - return false; -} -#endif - -#endif /* _MMP_PDMA_H_ */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 68130f5f599e..004736b6a9c8 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -745,6 +745,8 @@ enum dmaengine_alignment { DMAENGINE_ALIGN_16_BYTES = 4, DMAENGINE_ALIGN_32_BYTES = 5, DMAENGINE_ALIGN_64_BYTES = 6, + DMAENGINE_ALIGN_128_BYTES = 7, + DMAENGINE_ALIGN_256_BYTES = 8, }; /** diff --git a/include/linux/platform_data/dma-atmel.h b/include/linux/platform_data/dma-atmel.h deleted file mode 100644 index 069637e6004f..000000000000 --- a/include/linux/platform_data/dma-atmel.h +++ /dev/null @@ -1,61 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Header file for the Atmel AHB DMA Controller driver - * - * Copyright (C) 2008 Atmel Corporation - */ -#ifndef AT_HDMAC_H -#define AT_HDMAC_H - -#include <linux/dmaengine.h> - -/** - * struct at_dma_platform_data - Controller configuration parameters - * @nr_channels: Number of channels supported by hardware (max 8) - * @cap_mask: dma_capability flags supported by the platform - */ -struct at_dma_platform_data { - unsigned int nr_channels; - dma_cap_mask_t cap_mask; -}; - -/** - * struct at_dma_slave - Controller-specific information about a slave - * @dma_dev: required DMA master device - * @cfg: Platform-specific initializer for the CFG register - */ -struct at_dma_slave { - struct device *dma_dev; - u32 cfg; -}; - - -/* Platform-configurable bits in CFG */ -#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */ - -#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */ -#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */ -#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */ -#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */ -#define ATC_SRC_H2SEL_SW (0x0 << 9) -#define ATC_SRC_H2SEL_HW (0x1 << 9) -#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */ -#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */ -#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */ -#define ATC_DST_H2SEL_SW (0x0 << 13) -#define ATC_DST_H2SEL_HW (0x1 << 13) -#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */ -#define ATC_SOD (0x1 << 16) /* Stop On Done */ -#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */ -#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */ -#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */ -#define ATC_LOCK_IF_L_CHUNK (0x0 << 22) -#define ATC_LOCK_IF_L_BUFFER (0x1 << 22) -#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */ -#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */ -#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28) -#define ATC_FIFOCFG_HALFFIFO (0x1 << 28) -#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28) - - -#endif /* AT_HDMAC_H */ diff --git a/include/linux/platform_data/dma-coh901318.h b/include/linux/platform_data/dma-coh901318.h deleted file mode 100644 index 4cca529f8d56..000000000000 --- a/include/linux/platform_data/dma-coh901318.h +++ /dev/null @@ -1,72 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Platform data for the COH901318 DMA controller - * Copyright (C) 2007-2013 ST-Ericsson - */ - -#ifndef PLAT_COH901318_H -#define PLAT_COH901318_H - -#ifdef CONFIG_COH901318 - -/* We only support the U300 DMA channels */ -#define U300_DMA_MSL_TX_0 0 -#define U300_DMA_MSL_TX_1 1 -#define U300_DMA_MSL_TX_2 2 -#define U300_DMA_MSL_TX_3 3 -#define U300_DMA_MSL_TX_4 4 -#define U300_DMA_MSL_TX_5 5 -#define U300_DMA_MSL_TX_6 6 -#define U300_DMA_MSL_RX_0 7 -#define U300_DMA_MSL_RX_1 8 -#define U300_DMA_MSL_RX_2 9 -#define U300_DMA_MSL_RX_3 10 -#define U300_DMA_MSL_RX_4 11 -#define U300_DMA_MSL_RX_5 12 -#define U300_DMA_MSL_RX_6 13 -#define U300_DMA_MMCSD_RX_TX 14 -#define U300_DMA_MSPRO_TX 15 -#define U300_DMA_MSPRO_RX 16 -#define U300_DMA_UART0_TX 17 -#define U300_DMA_UART0_RX 18 -#define U300_DMA_APEX_TX 19 -#define U300_DMA_APEX_RX 20 -#define U300_DMA_PCM_I2S0_TX 21 -#define U300_DMA_PCM_I2S0_RX 22 -#define U300_DMA_PCM_I2S1_TX 23 -#define U300_DMA_PCM_I2S1_RX 24 -#define U300_DMA_XGAM_CDI 25 -#define U300_DMA_XGAM_PDI 26 -#define U300_DMA_SPI_TX 27 -#define U300_DMA_SPI_RX 28 -#define U300_DMA_GENERAL_PURPOSE_0 29 -#define U300_DMA_GENERAL_PURPOSE_1 30 -#define U300_DMA_GENERAL_PURPOSE_2 31 -#define U300_DMA_GENERAL_PURPOSE_3 32 -#define U300_DMA_GENERAL_PURPOSE_4 33 -#define U300_DMA_GENERAL_PURPOSE_5 34 -#define U300_DMA_GENERAL_PURPOSE_6 35 -#define U300_DMA_GENERAL_PURPOSE_7 36 -#define U300_DMA_GENERAL_PURPOSE_8 37 -#define U300_DMA_UART1_TX 38 -#define U300_DMA_UART1_RX 39 - -#define U300_DMA_DEVICE_CHANNELS 32 -#define U300_DMA_CHANNELS 40 - -/** - * coh901318_filter_id() - DMA channel filter function - * @chan: dma channel handle - * @chan_id: id of dma channel to be filter out - * - * In dma_request_channel() it specifies what channel id to be requested - */ -bool coh901318_filter_id(struct dma_chan *chan, void *chan_id); -#else -static inline bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) -{ - return false; -} -#endif - -#endif /* PLAT_COH901318_H */ diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h index 30e676b36b24..725602d9df91 100644 --- a/include/linux/platform_data/dma-imx-sdma.h +++ b/include/linux/platform_data/dma-imx-sdma.h @@ -57,15 +57,4 @@ struct sdma_script_start_addrs { /* End of v4 array */ }; -/** - * struct sdma_platform_data - platform specific data for SDMA engine - * - * @fw_name The firmware name - * @script_addrs SDMA scripts addresses in SDMA ROM - */ -struct sdma_platform_data { - char *fw_name; - struct sdma_script_start_addrs *script_addrs; -}; - #endif /* __MACH_MXC_SDMA_H__ */ diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h deleted file mode 100644 index 50161b6afb61..000000000000 --- a/include/linux/sirfsoc_dma.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _SIRFSOC_DMA_H_ -#define _SIRFSOC_DMA_H_ - -bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id); - -#endif |