Merge tag 'dmaengine-7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
 "Core:
   - New devm_of_dma_controller_register() API

  New Support:
   - Support for RZ/G3L SoC
   - Loongson Multi-Channel DMA controller support
   - Conversion of Xilinx AXI DMA binding
   - DW AXI CV1800B DMA support
   - Switchtec DMA engine driver

  Updates:
   - AMD MDB Endpoint and non-LL mode support
   - DW edma virtual IRQ for interrupt-emulation, cyclic transfers support"

* tag 'dmaengine-7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (65 commits)
  dmaengine: dw-edma: Add non-LL mode
  dmaengine: dw-edma: Add AMD MDB Endpoint Support
  dt-bindings: dmaengine: Fix spelling mistake "Looongson" -> "Looogson"
  dmaengine: loongson: Fix spelling mistake "Looongson" -> "Looogson"
  dmaengine: loongson: New driver for the Loongson Multi-Channel DMA controller
  dt-bindings: dmaengine: Add Loongson Multi-Channel DMA controller
  dmaengine: loongson: loongson2-apb: Simplify locking with guard() and scoped_guard()
  dmaengine: loongson: loongson2-apb: Convert to devm_clk_get_enabled()
  dmaengine: loongson: loongson2-apb: Convert to dmaenginem_async_device_register()
  dmaengine: loongson: New directory for Loongson DMA controllers drivers
  dt-bindings: dma: xlnx,axi-dma: Convert to DT schema
  dt-bindings: dma: rz-dmac: Add conditional schema for RZ/G3L
  dmaengine: sh: rz-dmac: Add device_{pause,resume}() callbacks
  dmaengine: sh: rz-dmac: Add device_tx_status() callback
  dmaengine: sh: rz-dmac: Use rz_lmdesc_setup() to invalidate descriptors
  dmaengine: sh: rz-dmac: Drop unnecessary local_irq_save() call
  dmaengine: sh: rz-dmac: Drop goto instruction and label
  dmaengine: sh: rz-dmac: Drop read of CHCTRL register
  dmaengine: sh: rz_dmac: add RZ/{T2H,N2H} support
  dt-bindings: dma: renesas,rz-dmac: document RZ/{T2H,N2H}
  ...
This commit is contained in:
Linus Torvalds
2026-04-17 10:29:01 -07:00
40 changed files with 3852 additions and 534 deletions

View File

@@ -0,0 +1,81 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/loongson,ls2k0300-dma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Loongson-2 Multi-Channel DMA controller
description:
The Loongson-2 Multi-Channel DMA controller is used for transferring data
between system memory and the peripherals on the APB bus.
maintainers:
- Binbin Zhou <zhoubinbin@loongson.cn>
allOf:
- $ref: dma-controller.yaml#
properties:
compatible:
enum:
- loongson,ls2k0300-dma
- loongson,ls2k3000-dma
reg:
maxItems: 1
interrupts:
description:
Should contain all of the per-channel DMA interrupts in ascending order
with respect to the DMA channel index.
minItems: 4
maxItems: 8
clocks:
maxItems: 1
'#dma-cells':
const: 2
description: |
DMA request from clients consists of 2 cells:
1. Channel index
2. Transfer request factor number, If no transfer factor, use 0.
The number is SoC-specific, and this should be specified with
relation to the device to use the DMA controller.
dma-channels:
enum: [4, 8]
required:
- compatible
- reg
- interrupts
- clocks
- '#dma-cells'
- dma-channels
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/clock/loongson,ls2k-clk.h>
dma-controller@1612c000 {
compatible = "loongson,ls2k0300-dma";
reg = <0x1612c000 0xff>;
interrupt-parent = <&liointc0>;
interrupts = <23 IRQ_TYPE_LEVEL_HIGH>,
<24 IRQ_TYPE_LEVEL_HIGH>,
<25 IRQ_TYPE_LEVEL_HIGH>,
<26 IRQ_TYPE_LEVEL_HIGH>,
<27 IRQ_TYPE_LEVEL_HIGH>,
<28 IRQ_TYPE_LEVEL_HIGH>,
<29 IRQ_TYPE_LEVEL_HIGH>,
<30 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk LS2K0300_CLK_APB_GATE>;
#dma-cells = <2>;
dma-channels = <8>;
};
...

View File

@@ -19,6 +19,7 @@ properties:
- renesas,r9a07g044-dmac # RZ/G2{L,LC}
- renesas,r9a07g054-dmac # RZ/V2L
- renesas,r9a08g045-dmac # RZ/G3S
- renesas,r9a08g046-dmac # RZ/G3L
- const: renesas,rz-dmac
- items:
@@ -29,6 +30,13 @@ properties:
- const: renesas,r9a09g057-dmac # RZ/V2H(P)
- const: renesas,r9a09g077-dmac # RZ/T2H
- items:
- enum:
- renesas,r9a09g087-dmac # RZ/N2H
- const: renesas,r9a09g077-dmac
reg:
items:
- description: Control and channel register block
@@ -36,27 +44,12 @@ properties:
minItems: 1
interrupts:
minItems: 16
maxItems: 17
interrupt-names:
items:
- const: error
- const: ch0
- const: ch1
- const: ch2
- const: ch3
- const: ch4
- const: ch5
- const: ch6
- const: ch7
- const: ch8
- const: ch9
- const: ch10
- const: ch11
- const: ch12
- const: ch13
- const: ch14
- const: ch15
minItems: 16
maxItems: 17
clocks:
items:
@@ -122,6 +115,35 @@ required:
allOf:
- $ref: dma-controller.yaml#
- if:
properties:
compatible:
contains:
enum:
- renesas,rz-dmac
- renesas,r9a09g057-dmac
then:
properties:
interrupt-names:
items:
- const: error
- const: ch0
- const: ch1
- const: ch2
- const: ch3
- const: ch4
- const: ch5
- const: ch6
- const: ch7
- const: ch8
- const: ch9
- const: ch10
- const: ch11
- const: ch12
- const: ch13
- const: ch14
- const: ch15
- if:
properties:
compatible:
@@ -131,6 +153,7 @@ allOf:
- renesas,r9a07g044-dmac
- renesas,r9a07g054-dmac
- renesas,r9a08g045-dmac
- renesas,r9a08g046-dmac
then:
properties:
reg:
@@ -189,6 +212,49 @@ allOf:
- renesas,icu
- resets
- if:
properties:
compatible:
contains:
const: renesas,r9a09g077-dmac
then:
properties:
reg:
maxItems: 1
clocks:
maxItems: 1
clock-names: false
resets: false
reset-names: false
interrupts:
maxItems: 16
interrupt-names:
items:
- const: ch0
- const: ch1
- const: ch2
- const: ch3
- const: ch4
- const: ch5
- const: ch6
- const: ch7
- const: ch8
- const: ch9
- const: ch10
- const: ch11
- const: ch12
- const: ch13
- const: ch14
- const: ch15
required:
- clocks
- power-domains
- renesas,icu
additionalProperties: false
examples:

View File

@@ -21,6 +21,7 @@ properties:
- enum:
- snps,axi-dma-1.01a
- intel,kmb-axi-dma
- sophgo,cv1800b-axi-dma
- starfive,jh7110-axi-dma
- starfive,jh8100-axi-dma
- items:
@@ -68,6 +69,8 @@ properties:
dma-noncoherent: true
dma-coherent: true
resets:
minItems: 1
maxItems: 2

View File

@@ -1,111 +0,0 @@
Xilinx AXI VDMA engine, it does transfers between memory and video devices.
It can be configured to have one channel or two channels. If configured
as two channels, one is to transmit to the video device and another is
to receive from the video device.
Xilinx AXI DMA engine, it does transfers between memory and AXI4 stream
target devices. It can be configured to have one channel or two channels.
If configured as two channels, one is to transmit to the device and another
is to receive from the device.
Xilinx AXI CDMA engine, it does transfers between memory-mapped source
address and a memory-mapped destination address.
Xilinx AXI MCDMA engine, it does transfer between memory and AXI4 stream
target devices. It can be configured to have up to 16 independent transmit
and receive channels.
Required properties:
- compatible: Should be one of-
"xlnx,axi-vdma-1.00.a"
"xlnx,axi-dma-1.00.a"
"xlnx,axi-cdma-1.00.a"
"xlnx,axi-mcdma-1.00.a"
- #dma-cells: Should be <1>, see "dmas" property below
- reg: Should contain VDMA registers location and length.
- xlnx,addrwidth: Should be the vdma addressing size in bits(ex: 32 bits).
- dma-ranges: Should be as the following <dma_addr cpu_addr max_len>.
- dma-channel child node: Should have at least one channel and can have up to
two channels per device. This node specifies the properties of each
DMA channel (see child node properties below).
- clocks: Input clock specifier. Refer to common clock bindings.
- clock-names: List of input clocks
For VDMA:
Required elements: "s_axi_lite_aclk"
Optional elements: "m_axi_mm2s_aclk" "m_axi_s2mm_aclk",
"m_axis_mm2s_aclk", "s_axis_s2mm_aclk"
For CDMA:
Required elements: "s_axi_lite_aclk", "m_axi_aclk"
For AXIDMA and MCDMA:
Required elements: "s_axi_lite_aclk"
Optional elements: "m_axi_mm2s_aclk", "m_axi_s2mm_aclk",
"m_axi_sg_aclk"
Required properties for VDMA:
- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w.
Optional properties for AXI DMA and MCDMA:
- xlnx,sg-length-width: Should be set to the width in bits of the length
register as configured in h/w. Takes values {8...26}. If the property
is missing or invalid then the default value 23 is used. This is the
maximum value that is supported by all IP versions.
Optional properties for AXI DMA:
- xlnx,axistream-connected: Tells whether DMA is connected to AXI stream IP.
- xlnx,irq-delay: Tells the interrupt delay timeout value. Valid range is from
0-255. Setting this value to zero disables the delay timer interrupt.
1 timeout interval = 125 * clock period of SG clock.
Optional properties for VDMA:
- xlnx,flush-fsync: Tells which channel to Flush on Frame sync.
It takes following values:
{1}, flush both channels
{2}, flush mm2s channel
{3}, flush s2mm channel
Required child node properties:
- compatible:
For VDMA: It should be either "xlnx,axi-vdma-mm2s-channel" or
"xlnx,axi-vdma-s2mm-channel".
For CDMA: It should be "xlnx,axi-cdma-channel".
For AXIDMA and MCDMA: It should be either "xlnx,axi-dma-mm2s-channel"
or "xlnx,axi-dma-s2mm-channel".
- interrupts: Should contain per channel VDMA interrupts.
- xlnx,datawidth: Should contain the stream data width, take values
{32,64...1024}.
Optional child node properties:
- xlnx,include-dre: Tells hardware is configured for Data
Realignment Engine.
Optional child node properties for VDMA:
- xlnx,genlock-mode: Tells Genlock synchronization is
enabled/disabled in hardware.
- xlnx,enable-vert-flip: Tells vertical flip is
enabled/disabled in hardware(S2MM path).
Optional child node properties for MCDMA:
- dma-channels: Number of dma channels in child node.
Example:
++++++++
axi_vdma_0: axivdma@40030000 {
compatible = "xlnx,axi-vdma-1.00.a";
#dma_cells = <1>;
reg = < 0x40030000 0x10000 >;
dma-ranges = <0x00000000 0x00000000 0x40000000>;
xlnx,num-fstores = <0x8>;
xlnx,flush-fsync = <0x1>;
xlnx,addrwidth = <0x20>;
clocks = <&clk 0>, <&clk 1>, <&clk 2>, <&clk 3>, <&clk 4>;
clock-names = "s_axi_lite_aclk", "m_axi_mm2s_aclk", "m_axi_s2mm_aclk",
"m_axis_mm2s_aclk", "s_axis_s2mm_aclk";
dma-channel@40030000 {
compatible = "xlnx,axi-vdma-mm2s-channel";
interrupts = < 0 54 4 >;
xlnx,datawidth = <0x40>;
} ;
dma-channel@40030030 {
compatible = "xlnx,axi-vdma-s2mm-channel";
interrupts = < 0 53 4 >;
xlnx,datawidth = <0x40>;
} ;
} ;

View File

@@ -0,0 +1,299 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/xilinx/xlnx,axi-dma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xilinx AXI VDMA, DMA, CDMA and MCDMA IP
maintainers:
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
- Abin Joseph <abin.joseph@amd.com>
description: >
Xilinx AXI VDMA engine, it does transfers between memory and video devices.
It can be configured to have one channel or two channels. If configured
as two channels, one is to transmit to the video device and another is
to receive from the video device.
Xilinx AXI DMA engine, it does transfers between memory and AXI4 stream
target devices. It can be configured to have one channel or two channels.
If configured as two channels, one is to transmit to the device and another
is to receive from the device.
Xilinx AXI CDMA engine, it does transfers between memory-mapped source
address and a memory-mapped destination address.
Xilinx AXI MCDMA engine, it does transfer between memory and AXI4 stream
target devices. It can be configured to have up to 16 independent transmit
and receive channels.
properties:
compatible:
enum:
- xlnx,axi-cdma-1.00.a
- xlnx,axi-dma-1.00.a
- xlnx,axi-mcdma-1.00.a
- xlnx,axi-vdma-1.00.a
reg:
maxItems: 1
"#dma-cells":
const: 1
"#address-cells":
const: 1
"#size-cells":
const: 1
interrupts:
items:
- description: Interrupt for single channel (MM2S or S2MM)
- description: Interrupt for dual channel configuration
minItems: 1
description:
Interrupt lines for the DMA controller. Only used when
xlnx,axistream-connected is present (DMA connected to AXI Stream
IP). When child dma-channel nodes are present, interrupts are
specified in the child nodes instead.
clocks:
minItems: 1
maxItems: 5
clock-names:
minItems: 1
maxItems: 5
dma-ranges: true
xlnx,addrwidth:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [32, 64]
description: The DMA addressing size in bits.
xlnx,num-fstores:
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 1
maximum: 32
description: Should be the number of framebuffers as configured in h/w.
xlnx,flush-fsync:
type: boolean
description: Tells which channel to Flush on Frame sync.
xlnx,sg-length-width:
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 8
maximum: 26
default: 23
description:
Width in bits of the length register as configured in hardware.
xlnx,irq-delay:
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 255
description:
Tells the interrupt delay timeout value. Valid range is from 0-255.
Setting this value to zero disables the delay timer interrupt.
1 timeout interval = 125 * clock period of SG clock.
xlnx,axistream-connected:
type: boolean
description: Tells whether DMA is connected to AXI stream IP.
patternProperties:
"^dma-channel(-mm2s|-s2mm)?$":
type: object
description:
Should have at least one channel and can have up to two channels per
device. This node specifies the properties of each DMA channel.
properties:
compatible:
enum:
- xlnx,axi-vdma-mm2s-channel
- xlnx,axi-vdma-s2mm-channel
- xlnx,axi-cdma-channel
- xlnx,axi-dma-mm2s-channel
- xlnx,axi-dma-s2mm-channel
interrupts:
maxItems: 1
xlnx,datawidth:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [32, 64, 128, 256, 512, 1024]
description: Should contain the stream data width, take values {32,64...1024}.
xlnx,include-dre:
type: boolean
description: Tells hardware is configured for Data Realignment Engine.
xlnx,genlock-mode:
type: boolean
description: Tells Genlock synchronization is enabled/disabled in hardware.
xlnx,enable-vert-flip:
type: boolean
description:
Tells vertical flip is enabled/disabled in hardware(S2MM path).
dma-channels:
$ref: /schemas/types.yaml#/definitions/uint32
description: Number of dma channels in child node.
required:
- compatible
- interrupts
- xlnx,datawidth
additionalProperties: false
allOf:
- $ref: ../dma-controller.yaml#
- if:
properties:
compatible:
contains:
const: xlnx,axi-vdma-1.00.a
then:
properties:
clock-names:
items:
- const: s_axi_lite_aclk
- const: m_axi_mm2s_aclk
- const: m_axi_s2mm_aclk
- const: m_axis_mm2s_aclk
- const: s_axis_s2mm_aclk
minItems: 1
interrupts: false
patternProperties:
"^dma-channel(-mm2s|-s2mm)?$":
properties:
compatible:
enum:
- xlnx,axi-vdma-mm2s-channel
- xlnx,axi-vdma-s2mm-channel
required:
- xlnx,num-fstores
- if:
properties:
compatible:
contains:
const: xlnx,axi-cdma-1.00.a
then:
properties:
clock-names:
items:
- const: s_axi_lite_aclk
- const: m_axi_aclk
interrupts: false
patternProperties:
"^dma-channel(-mm2s|-s2mm)?$":
properties:
compatible:
enum:
- xlnx,axi-cdma-channel
- if:
properties:
compatible:
contains:
enum:
- xlnx,axi-dma-1.00.a
- xlnx,axi-mcdma-1.00.a
then:
properties:
clock-names:
items:
- const: s_axi_lite_aclk
- const: m_axi_mm2s_aclk
- const: m_axi_s2mm_aclk
- const: m_axi_sg_aclk
minItems: 1
patternProperties:
"^dma-channel(-mm2s|-s2mm)?(@[0-9a-f]+)?$":
properties:
compatible:
enum:
- xlnx,axi-dma-mm2s-channel
- xlnx,axi-dma-s2mm-channel
required:
- "#dma-cells"
- reg
- xlnx,addrwidth
- dma-ranges
- clocks
- clock-names
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
dma-controller@40030000 {
compatible = "xlnx,axi-vdma-1.00.a";
reg = <0x40030000 0x10000>;
#dma-cells = <1>;
#address-cells = <1>;
#size-cells = <1>;
dma-ranges = <0x0 0x0 0x40000000>;
clocks = <&clk 0>, <&clk 1>, <&clk 2>, <&clk 3>, <&clk 4>;
clock-names = "s_axi_lite_aclk", "m_axi_mm2s_aclk",
"m_axi_s2mm_aclk", "m_axis_mm2s_aclk",
"s_axis_s2mm_aclk";
xlnx,num-fstores = <8>;
xlnx,flush-fsync;
xlnx,addrwidth = <32>;
dma-channel-mm2s {
compatible = "xlnx,axi-vdma-mm2s-channel";
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
xlnx,datawidth = <64>;
};
dma-channel-s2mm {
compatible = "xlnx,axi-vdma-s2mm-channel";
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
xlnx,datawidth = <64>;
};
};
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
dma-controller@a4030000 {
compatible = "xlnx,axi-dma-1.00.a";
reg = <0xa4030000 0x10000>;
#dma-cells = <1>;
#address-cells = <1>;
#size-cells = <1>;
dma-ranges = <0x0 0x0 0x40000000>;
clocks = <&clk 0>, <&clk 1>, <&clk 2>, <&clk 3>;
clock-names = "s_axi_lite_aclk", "m_axi_mm2s_aclk",
"m_axi_s2mm_aclk", "m_axi_sg_aclk";
xlnx,addrwidth = <32>;
xlnx,sg-length-width = <14>;
dma-channel-mm2s {
compatible = "xlnx,axi-dma-mm2s-channel";
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
xlnx,datawidth = <64>;
xlnx,include-dre;
};
dma-channel-s2mm {
compatible = "xlnx,axi-dma-s2mm-channel";
interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
xlnx,datawidth = <64>;
xlnx,include-dre;
};
};

View File

@@ -15039,12 +15039,14 @@ S: Maintained
F: Documentation/devicetree/bindings/gpio/loongson,ls-gpio.yaml
F: drivers/gpio/gpio-loongson-64bit.c
LOONGSON-2 APB DMA DRIVER
LOONGSON-2 DMA DRIVER
M: Binbin Zhou <zhoubinbin@loongson.cn>
L: dmaengine@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/dma/loongson,ls2k0300-dma.yaml
F: Documentation/devicetree/bindings/dma/loongson,ls2x-apbdma.yaml
F: drivers/dma/loongson2-apb-dma.c
F: drivers/dma/loongson/loongson2-apb-cmc-dma.c
F: drivers/dma/loongson/loongson2-apb-dma.c
LOONGSON LS2X I2C DRIVER
M: Binbin Zhou <zhoubinbin@loongson.cn>
@@ -17832,6 +17834,7 @@ F: arch/mips/boot/dts/loongson/loongson1*
F: arch/mips/configs/loongson1_defconfig
F: arch/mips/loongson32/
F: drivers/*/*loongson1*
F: drivers/dma/loongson/loongson1-apb-dma.c
F: drivers/mtd/nand/raw/loongson-nand-controller.c
F: drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
F: sound/soc/loongson/loongson1_ac97.c
@@ -25724,6 +25727,13 @@ S: Supported
F: include/net/switchdev.h
F: net/switchdev/
SWITCHTEC DMA DRIVER
M: Kelvin Cao <kelvin.cao@microchip.com>
M: Logan Gunthorpe <logang@deltatee.com>
L: dmaengine@vger.kernel.org
S: Maintained
F: drivers/dma/switchtec_dma.c
SY8106A REGULATOR DRIVER
M: Icenowy Zheng <icenowy@aosc.io>
S: Maintained

View File

@@ -376,29 +376,6 @@ config K3_DMA
Support the DMA engine for Hisilicon K3 platform
devices.
config LOONGSON1_APB_DMA
tristate "Loongson1 APB DMA support"
depends on MACH_LOONGSON32 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
This selects support for the APB DMA controller in Loongson1 SoCs,
which is required by Loongson1 NAND and audio support.
config LOONGSON2_APB_DMA
tristate "Loongson2 APB DMA support"
depends on LOONGARCH || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Support for the Loongson2 APB DMA controller driver. The
DMA controller is having single DMA channel which can be
configured for different peripherals like audio, nand, sdio
etc which is in APB bus.
This DMA controller transfers data from memory to peripheral fifo.
It does not support memory to memory data transfer.
config LPC18XX_DMAMUX
bool "NXP LPC18xx/43xx DMA MUX for PL080"
depends on ARCH_LPC18XX || COMPILE_TEST
@@ -505,7 +482,7 @@ config MV_XOR_V2
platforms.
config MXS_DMA
bool "MXS DMA support"
tristate "MXS DMA support"
depends on ARCH_MXS || ARCH_MXC || COMPILE_TEST
select STMP_DEVICE
select DMA_ENGINE
@@ -610,6 +587,15 @@ config SPRD_DMA
help
Enable support for the on-chip DMA controller on Spreadtrum platform.
config SWITCHTEC_DMA
tristate "Switchtec PSX/PFX Switch DMA Engine Support"
depends on PCI
select DMA_ENGINE
help
Some Switchtec PSX/PFX PCIe Switches support additional DMA engines.
These are exposed via an extra function on the switch's upstream
port.
config TXX9_DMAC
tristate "Toshiba TXx9 SoC DMA support"
depends on MACH_TX49XX
@@ -774,6 +760,8 @@ source "drivers/dma/fsl-dpaa2-qdma/Kconfig"
source "drivers/dma/lgm/Kconfig"
source "drivers/dma/loongson/Kconfig"
source "drivers/dma/stm32/Kconfig"
# clients

View File

@@ -49,8 +49,6 @@ obj-$(CONFIG_INTEL_IDMA64) += idma64.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-y += idxd/
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LOONGSON1_APB_DMA) += loongson1-apb-dma.o
obj-$(CONFIG_LOONGSON2_APB_DMA) += loongson2-apb-dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
obj-$(CONFIG_LPC32XX_DMAMUX) += lpc32xx-dmamux.o
obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
@@ -74,6 +72,7 @@ obj-$(CONFIG_SF_PDMA) += sf-pdma/
obj-$(CONFIG_SOPHGO_CV1800B_DMAMUX) += cv1800b-dmamux.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
obj-$(CONFIG_SWITCHTEC_DMA) += switchtec_dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_TEGRA186_GPC_DMA) += tegra186-gpc-dma.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
@@ -87,6 +86,7 @@ obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
obj-$(CONFIG_INTEL_LDMA) += lgm/
obj-y += amd/
obj-y += loongson/
obj-y += mediatek/
obj-y += qcom/
obj-y += stm32/

View File

@@ -134,6 +134,7 @@ struct axi_dmac_desc {
struct axi_dmac_chan *chan;
bool cyclic;
bool cyclic_eot;
bool have_partial_xfer;
unsigned int num_submitted;
@@ -162,6 +163,7 @@ struct axi_dmac_chan {
bool hw_cyclic;
bool hw_2d;
bool hw_sg;
bool hw_cyclic_hotfix;
};
struct axi_dmac {
@@ -227,29 +229,94 @@ static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
return true;
}
static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
{
return list_first_entry_or_null(&chan->active_descs,
struct axi_dmac_desc, vdesc.node);
}
static struct axi_dmac_desc *axi_dmac_get_next_desc(struct axi_dmac *dmac,
struct axi_dmac_chan *chan)
{
struct axi_dmac_desc *active = axi_dmac_active_desc(chan);
struct virt_dma_desc *vdesc;
struct axi_dmac_desc *desc;
unsigned int val;
/*
* Just play safe and ignore any SOF if we have an active cyclic transfer
* flagged to end. We'll start it as soon as the current cyclic one ends.
*/
if (active && active->cyclic_eot)
return NULL;
/*
* It means a SW cyclic transfer is in place so we should just return
* the same descriptor. SW cyclic transfer termination is handled
* in axi_dmac_transfer_done().
*/
if (chan->next_desc)
return chan->next_desc;
vdesc = vchan_next_desc(&chan->vchan);
if (!vdesc)
return NULL;
if (active && active->cyclic && !(vdesc->tx.flags & DMA_PREP_LOAD_EOT)) {
struct device *dev = chan_to_axi_dmac(chan)->dma_dev.dev;
dev_warn(dev, "Discarding non EOT transfer after cyclic\n");
list_del(&vdesc->node);
return NULL;
}
list_move_tail(&vdesc->node, &chan->active_descs);
desc = to_axi_dmac_desc(vdesc);
chan->next_desc = desc;
if (!active || !active->cyclic)
return desc;
active->cyclic_eot = true;
if (chan->hw_sg) {
unsigned long flags = AXI_DMAC_HW_FLAG_IRQ | AXI_DMAC_HW_FLAG_LAST;
/*
* Let's then stop the current cyclic transfer by making sure we
* get an EOT interrupt and to open the cyclic loop by marking
* the last segment.
*/
active->sg[active->num_sgs - 1].hw->flags = flags;
return NULL;
}
/*
* Clear the cyclic bit if there's no Scatter-Gather HW so that we get
* at the end of the transfer.
*/
val = axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS);
val &= ~AXI_DMAC_FLAG_CYCLIC;
axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, val);
return NULL;
}
static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
{
struct axi_dmac *dmac = chan_to_axi_dmac(chan);
struct virt_dma_desc *vdesc;
struct axi_dmac_desc *desc;
struct axi_dmac_sg *sg;
unsigned int flags = 0;
unsigned int val;
desc = axi_dmac_get_next_desc(dmac, chan);
if (!desc)
return;
val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
if (val) /* Queue is full, wait for the next SOT IRQ */
return;
desc = chan->next_desc;
if (!desc) {
vdesc = vchan_next_desc(&chan->vchan);
if (!vdesc)
return;
list_move_tail(&vdesc->node, &chan->active_descs);
desc = to_axi_dmac_desc(vdesc);
chan->next_desc = desc;
}
sg = &desc->sg[desc->num_submitted];
/* Already queued in cyclic mode. Wait for it to finish */
@@ -291,10 +358,12 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
* call, enable hw cyclic mode to avoid unnecessary interrupts.
*/
if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) {
if (chan->hw_sg)
if (chan->hw_sg) {
desc->sg[desc->num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_IRQ;
else if (desc->num_sgs == 1)
} else if (desc->num_sgs == 1) {
chan->next_desc = NULL;
flags |= AXI_DMAC_FLAG_CYCLIC;
}
}
if (chan->hw_partial_xfer)
@@ -312,12 +381,6 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
}
static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
{
return list_first_entry_or_null(&chan->active_descs,
struct axi_dmac_desc, vdesc.node);
}
static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
struct axi_dmac_sg *sg)
{
@@ -398,6 +461,61 @@ static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
}
}
static bool axi_dmac_handle_cyclic_eot(struct axi_dmac_chan *chan,
struct axi_dmac_desc *active)
{
struct device *dev = chan_to_axi_dmac(chan)->dma_dev.dev;
struct virt_dma_desc *vdesc;
/* wrap around */
active->num_completed = 0;
if (active->cyclic_eot) {
/*
* It means an HW cyclic transfer was marked to stop. And we
* know we have something to schedule, so start the next
* transfer now the cyclic one is done.
*/
list_del(&active->vdesc.node);
vchan_cookie_complete(&active->vdesc);
if (chan->hw_cyclic_hotfix) {
struct axi_dmac *dmac = chan_to_axi_dmac(chan);
/*
* In older IP cores, ending a cyclic transfer by clearing
* the CYCLIC flag does not guarantee a graceful end.
* It can happen that some data (of the next frame) is
* already prefetched and will be wrongly visible in the
* next transfer. To workaround this, we need to reenable
* the core so everything is flushed. Newer cores handles
* this correctly and do not require this "hotfix". The
* SG IP also does not require this.
*/
dev_dbg(dev, "HW cyclic hotfix\n");
axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
}
return true;
}
vdesc = vchan_next_desc(&chan->vchan);
if (!vdesc)
return false;
if (!(vdesc->tx.flags & DMA_PREP_LOAD_EOT)) {
dev_warn(dev, "Discarding non EOT transfer after cyclic\n");
list_del(&vdesc->node);
return false;
}
/* then let's end the cyclic transfer */
chan->next_desc = NULL;
list_del(&active->vdesc.node);
vchan_cookie_complete(&active->vdesc);
return true;
}
static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
unsigned int completed_transfers)
{
@@ -416,6 +534,7 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
if (chan->hw_sg) {
if (active->cyclic) {
vchan_cyclic_callback(&active->vdesc);
start_next = axi_dmac_handle_cyclic_eot(chan, active);
} else {
list_del(&active->vdesc.node);
vchan_cookie_complete(&active->vdesc);
@@ -445,7 +564,8 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
if (active->num_completed == active->num_sgs ||
sg->partial_len) {
if (active->cyclic) {
active->num_completed = 0; /* wrap around */
/* keep start_next as is, if already true... */
start_next |= axi_dmac_handle_cyclic_eot(chan, active);
} else {
list_del(&active->vdesc.node);
vchan_cookie_complete(&active->vdesc);
@@ -657,7 +777,12 @@ axi_dmac_prep_peripheral_dma_vec(struct dma_chan *c, const struct dma_vec *vecs,
vecs[i].len, dsg);
}
desc->cyclic = false;
desc->cyclic = flags & DMA_PREP_REPEAT;
if (desc->cyclic) {
/* Chain the last descriptor to the first, and remove its "last" flag */
desc->sg[num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_LAST;
desc->sg[num_sgs - 1].hw->next_sg_addr = desc->sg[0].hw_phys;
}
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
}
@@ -1053,6 +1178,9 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
chan->length_align_mask = chan->address_align_mask;
}
if (version < ADI_AXI_PCORE_VER(4, 6, 0) && !chan->hw_sg)
chan->hw_cyclic_hotfix = true;
return 0;
}

View File

@@ -31,29 +31,29 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/hardirq.h>
#include <linux/spinlock.h>
#include <linux/of.h>
#include <linux/property.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/jiffies.h>
#include <linux/rculist.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/acpi_dma.h>
#include <linux/of_dma.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/hardirq.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/mempool.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/numa.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/percpu.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "dmaengine.h"
@@ -765,7 +765,7 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
mutex_lock(&dma_list_mutex);
list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
/* Finds a DMA controller with matching device node */
if (np && device->dev->of_node && np != device->dev->of_node)
if (np && !device_match_of_node(device->dev, np))
continue;
chan = find_candidate(device, mask, fn, fn_param);
@@ -943,12 +943,14 @@ static void dmaenginem_release_channel(void *chan)
struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name)
{
struct dma_chan *chan = dma_request_chan(dev, name);
int ret = 0;
struct dma_chan *chan;
int ret;
if (!IS_ERR(chan))
ret = devm_add_action_or_reset(dev, dmaenginem_release_channel, chan);
chan = dma_request_chan(dev, name);
if (IS_ERR(chan))
return chan;
ret = devm_add_action_or_reset(dev, dmaenginem_release_channel, chan);
if (ret)
return ERR_PTR(ret);

View File

@@ -50,6 +50,7 @@
#define AXI_DMA_FLAG_HAS_APB_REGS BIT(0)
#define AXI_DMA_FLAG_HAS_RESETS BIT(1)
#define AXI_DMA_FLAG_USE_CFG2 BIT(2)
#define AXI_DMA_FLAG_ARG0_AS_CHAN BIT(3)
static inline void
axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
@@ -342,8 +343,8 @@ static void axi_desc_put(struct axi_dma_desc *desc)
kfree(desc);
atomic_sub(descs_put, &chan->descs_allocated);
dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
axi_chan_name(chan), descs_put,
atomic_read(&chan->descs_allocated));
axi_chan_name(chan), descs_put,
atomic_read(&chan->descs_allocated));
}
static void vchan_desc_put(struct virt_dma_desc *vdesc)
@@ -353,7 +354,7 @@ static void vchan_desc_put(struct virt_dma_desc *vdesc)
static enum dma_status
dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
struct dma_tx_state *txstate)
{
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
struct virt_dma_desc *vdesc;
@@ -419,6 +420,7 @@ static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
iowrite32(val, chan->chip->apb_regs + offset);
}
/* Called in chan locked context */
static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
struct axi_dma_desc *first)
@@ -491,7 +493,7 @@ static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
desc = vd_to_axi_desc(vd);
dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
vd->tx.cookie);
vd->tx.cookie);
axi_chan_block_xfer_start(chan, desc);
}
@@ -592,8 +594,6 @@ static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
(chan->id * DMA_APB_HS_SEL_BIT_SIZE));
reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
return;
}
/*
@@ -1162,7 +1162,7 @@ static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
axi_chan_irq_clear(chan, status);
dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
axi_chan_name(chan), i, status);
axi_chan_name(chan), i, status);
if (status & DWAXIDMAC_IRQ_ALL_ERR)
axi_chan_handle_err(chan, status);
@@ -1358,16 +1358,27 @@ static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
unsigned int handshake = dma_spec->args[0];
struct dw_axi_dma *dw = ofdma->of_dma_data;
struct axi_dma_chan *chan;
struct axi_dma_chan *chan = NULL;
struct dma_chan *dchan;
dchan = dma_get_any_slave_channel(&dw->dma);
if (dw->hdata->use_handshake_as_channel_number) {
if (handshake >= dw->hdata->nr_channels)
return NULL;
chan = &dw->chan[handshake];
dchan = dma_get_slave_channel(&chan->vc.chan);
} else {
dchan = dma_get_any_slave_channel(&dw->dma);
}
if (!dchan)
return NULL;
chan = dchan_to_axi_dma_chan(dchan);
chan->hw_handshake_num = dma_spec->args[0];
if (!chan)
chan = dchan_to_axi_dma_chan(dchan);
chan->hw_handshake_num = handshake;
return dchan;
}
@@ -1451,7 +1462,7 @@ static int axi_req_irqs(struct platform_device *pdev, struct axi_dma_chip *chip)
if (chip->irq[i] < 0)
return chip->irq[i];
ret = devm_request_irq(chip->dev, chip->irq[i], dw_axi_dma_interrupt,
IRQF_SHARED, KBUILD_MODNAME, chip);
IRQF_SHARED, KBUILD_MODNAME, chip);
if (ret < 0)
return ret;
}
@@ -1506,6 +1517,8 @@ static int dw_probe(struct platform_device *pdev)
return ret;
}
chip->dw->hdata->use_handshake_as_channel_number = !!(flags & AXI_DMA_FLAG_ARG0_AS_CHAN);
chip->dw->hdata->use_cfg2 = !!(flags & AXI_DMA_FLAG_USE_CFG2);
chip->core_clk = devm_clk_get(chip->dev, "core-clk");
@@ -1645,7 +1658,7 @@ static void dw_remove(struct platform_device *pdev)
of_dma_controller_free(chip->dev->of_node);
list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
vc.chan.device_node) {
vc.chan.device_node) {
list_del(&chan->vc.chan.device_node);
tasklet_kill(&chan->vc.task);
}
@@ -1661,6 +1674,9 @@ static const struct of_device_id dw_dma_of_id_table[] = {
}, {
.compatible = "intel,kmb-axi-dma",
.data = (void *)AXI_DMA_FLAG_HAS_APB_REGS,
}, {
.compatible = "sophgo,cv1800b-axi-dma",
.data = (void *)AXI_DMA_FLAG_ARG0_AS_CHAN,
}, {
.compatible = "starfive,jh7110-axi-dma",
.data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2),

View File

@@ -34,6 +34,7 @@ struct dw_axi_dma_hcfg {
bool reg_map_8_channels;
bool restrict_axi_burst_len;
bool use_cfg2;
bool use_handshake_as_channel_number;
};
struct axi_dma_chan {

View File

@@ -223,6 +223,43 @@ static int dw_edma_device_config(struct dma_chan *dchan,
struct dma_slave_config *config)
{
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
bool cfg_non_ll;
int non_ll = 0;
chan->non_ll = false;
if (chan->dw->chip->mf == EDMA_MF_HDMA_NATIVE) {
if (config->peripheral_config &&
config->peripheral_size != sizeof(int)) {
dev_err(dchan->device->dev,
"config param peripheral size mismatch\n");
return -EINVAL;
}
/*
* When there is no valid LLP base address available then the
* default DMA ops will use the non-LL mode.
*
* Cases where LL mode is enabled and client wants to use the
* non-LL mode then also client can do so via providing the
* peripheral_config param.
*/
cfg_non_ll = chan->dw->chip->cfg_non_ll;
if (config->peripheral_config) {
non_ll = *(int *)config->peripheral_config;
if (cfg_non_ll && !non_ll) {
dev_err(dchan->device->dev, "invalid configuration\n");
return -EINVAL;
}
}
if (cfg_non_ll || non_ll)
chan->non_ll = true;
} else if (config->peripheral_config) {
dev_err(dchan->device->dev,
"peripheral config param applicable only for HDMA\n");
return -EINVAL;
}
memcpy(&chan->config, config, sizeof(*config));
chan->configured = true;
@@ -358,6 +395,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
struct dw_edma_desc *desc;
u64 src_addr, dst_addr;
size_t fsz = 0;
u32 bursts_max;
u32 cnt = 0;
int i;
@@ -415,6 +453,13 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
return NULL;
}
/*
* For non-LL mode, only a single burst can be handled
* in a single chunk unlike LL mode where multiple bursts
* can be configured in a single chunk.
*/
bursts_max = chan->non_ll ? 1 : chan->ll_max;
desc = dw_edma_alloc_desc(chan);
if (unlikely(!desc))
goto err_alloc;
@@ -450,7 +495,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg)
break;
if (chunk->bursts_alloc == chan->ll_max) {
if (chunk->bursts_alloc == bursts_max) {
chunk = dw_edma_alloc_chunk(desc);
if (unlikely(!chunk))
goto err_alloc;
@@ -663,7 +708,96 @@ static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
chan->status = EDMA_ST_IDLE;
}
static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
static void dw_edma_emul_irq_ack(struct irq_data *d)
{
struct dw_edma *dw = irq_data_get_irq_chip_data(d);
dw_edma_core_ack_emulated_irq(dw);
}
/*
* irq_chip implementation for interrupt-emulation doorbells.
*
* The emulated source has no mask/unmask mechanism. With handle_level_irq(),
* the flow is therefore:
* 1) .irq_ack() deasserts the source
* 2) registered handlers (if any) are dispatched
* Since deassertion is already done in .irq_ack(), handlers do not need to take
* care of it, hence IRQCHIP_ONESHOT_SAFE.
*/
static struct irq_chip dw_edma_emul_irqchip = {
.name = "dw-edma-emul",
.irq_ack = dw_edma_emul_irq_ack,
.flags = IRQCHIP_ONESHOT_SAFE | IRQCHIP_SKIP_SET_WAKE,
};
static int dw_edma_emul_irq_alloc(struct dw_edma *dw)
{
struct dw_edma_chip *chip = dw->chip;
int virq;
chip->db_irq = 0;
chip->db_offset = ~0;
/*
* Only meaningful when the core provides the deassert sequence
* for interrupt emulation.
*/
if (!dw->core->ack_emulated_irq)
return 0;
/*
* Allocate a single, requestable Linux virtual IRQ number.
* Use >= 1 so that 0 can remain a "not available" sentinel.
*/
virq = irq_alloc_desc(NUMA_NO_NODE);
if (virq < 0)
return virq;
irq_set_chip_and_handler(virq, &dw_edma_emul_irqchip, handle_level_irq);
irq_set_chip_data(virq, dw);
irq_set_noprobe(virq);
chip->db_irq = virq;
chip->db_offset = dw_edma_core_db_offset(dw);
return 0;
}
static void dw_edma_emul_irq_free(struct dw_edma *dw)
{
struct dw_edma_chip *chip = dw->chip;
if (!chip)
return;
if (chip->db_irq <= 0)
return;
irq_free_descs(chip->db_irq, 1);
chip->db_irq = 0;
chip->db_offset = ~0;
}
static inline irqreturn_t dw_edma_interrupt_emulated(void *data)
{
struct dw_edma_irq *dw_irq = data;
struct dw_edma *dw = dw_irq->dw;
int db_irq = dw->chip->db_irq;
if (db_irq > 0) {
/*
* Interrupt emulation may assert the IRQ line without updating the
* normal DONE/ABORT status bits. With a shared IRQ handler we
* cannot reliably detect such events by status registers alone, so
* always perform the core-specific deassert sequence.
*/
generic_handle_irq(db_irq);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static inline irqreturn_t dw_edma_interrupt_write_inner(int irq, void *data)
{
struct dw_edma_irq *dw_irq = data;
@@ -672,7 +806,7 @@ static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
dw_edma_abort_interrupt);
}
static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
static inline irqreturn_t dw_edma_interrupt_read_inner(int irq, void *data)
{
struct dw_edma_irq *dw_irq = data;
@@ -681,12 +815,33 @@ static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
dw_edma_abort_interrupt);
}
static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
{
irqreturn_t ret = IRQ_NONE;
ret |= dw_edma_interrupt_write(irq, data);
ret |= dw_edma_interrupt_read(irq, data);
ret |= dw_edma_interrupt_write_inner(irq, data);
ret |= dw_edma_interrupt_emulated(data);
return ret;
}
static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
{
irqreturn_t ret = IRQ_NONE;
ret |= dw_edma_interrupt_read_inner(irq, data);
ret |= dw_edma_interrupt_emulated(data);
return ret;
}
static inline irqreturn_t dw_edma_interrupt_common(int irq, void *data)
{
irqreturn_t ret = IRQ_NONE;
ret |= dw_edma_interrupt_write_inner(irq, data);
ret |= dw_edma_interrupt_read_inner(irq, data);
ret |= dw_edma_interrupt_emulated(data);
return ret;
}
@@ -977,6 +1132,11 @@ int dw_edma_probe(struct dw_edma_chip *chip)
if (err)
return err;
/* Allocate a dedicated virtual IRQ for interrupt-emulation doorbells */
err = dw_edma_emul_irq_alloc(dw);
if (err)
dev_warn(dev, "Failed to allocate emulation IRQ: %d\n", err);
/* Setup write/read channels */
err = dw_edma_channel_setup(dw, wr_alloc, rd_alloc);
if (err)
@@ -992,6 +1152,7 @@ int dw_edma_probe(struct dw_edma_chip *chip)
err_irq_free:
for (i = (dw->nr_irqs - 1); i >= 0; i--)
free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
dw_edma_emul_irq_free(dw);
return err;
}
@@ -1014,6 +1175,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)
/* Free irqs */
for (i = (dw->nr_irqs - 1); i >= 0; i--)
free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
dw_edma_emul_irq_free(dw);
/* Deregister eDMA device */
dma_async_device_unregister(&dw->dma);

View File

@@ -86,6 +86,7 @@ struct dw_edma_chan {
u8 configured;
struct dma_slave_config config;
bool non_ll;
};
struct dw_edma_irq {
@@ -126,6 +127,8 @@ struct dw_edma_core_ops {
void (*start)(struct dw_edma_chunk *chunk, bool first);
void (*ch_config)(struct dw_edma_chan *chan);
void (*debugfs_on)(struct dw_edma *dw);
void (*ack_emulated_irq)(struct dw_edma *dw);
resource_size_t (*db_offset)(struct dw_edma *dw);
};
struct dw_edma_sg {
@@ -206,4 +209,19 @@ void dw_edma_core_debugfs_on(struct dw_edma *dw)
dw->core->debugfs_on(dw);
}
static inline int dw_edma_core_ack_emulated_irq(struct dw_edma *dw)
{
if (!dw->core->ack_emulated_irq)
return -EOPNOTSUPP;
dw->core->ack_emulated_irq(dw);
return 0;
}
static inline resource_size_t
dw_edma_core_db_offset(struct dw_edma *dw)
{
return dw->core->db_offset(dw);
}
#endif /* _DW_EDMA_CORE_H */

View File

@@ -14,14 +14,35 @@
#include <linux/pci-epf.h>
#include <linux/msi.h>
#include <linux/bitfield.h>
#include <linux/sizes.h>
#include "dw-edma-core.h"
#define DW_PCIE_VSEC_DMA_ID 0x6
#define DW_PCIE_VSEC_DMA_BAR GENMASK(10, 8)
#define DW_PCIE_VSEC_DMA_MAP GENMASK(2, 0)
#define DW_PCIE_VSEC_DMA_WR_CH GENMASK(9, 0)
#define DW_PCIE_VSEC_DMA_RD_CH GENMASK(25, 16)
/* Synopsys */
#define DW_PCIE_SYNOPSYS_VSEC_DMA_ID 0x6
#define DW_PCIE_SYNOPSYS_VSEC_DMA_BAR GENMASK(10, 8)
#define DW_PCIE_SYNOPSYS_VSEC_DMA_MAP GENMASK(2, 0)
#define DW_PCIE_SYNOPSYS_VSEC_DMA_WR_CH GENMASK(9, 0)
#define DW_PCIE_SYNOPSYS_VSEC_DMA_RD_CH GENMASK(25, 16)
/* AMD MDB (Xilinx) specific defines */
#define PCI_DEVICE_ID_XILINX_B054 0xb054
#define DW_PCIE_XILINX_MDB_VSEC_DMA_ID 0x6
#define DW_PCIE_XILINX_MDB_VSEC_ID 0x20
#define DW_PCIE_XILINX_MDB_VSEC_DMA_BAR GENMASK(10, 8)
#define DW_PCIE_XILINX_MDB_VSEC_DMA_MAP GENMASK(2, 0)
#define DW_PCIE_XILINX_MDB_VSEC_DMA_WR_CH GENMASK(9, 0)
#define DW_PCIE_XILINX_MDB_VSEC_DMA_RD_CH GENMASK(25, 16)
#define DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_HIGH 0xc
#define DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_LOW 0x8
#define DW_PCIE_XILINX_MDB_INVALID_ADDR (~0ULL)
#define DW_PCIE_XILINX_MDB_LL_OFF_GAP 0x200000
#define DW_PCIE_XILINX_MDB_LL_SIZE 0x800
#define DW_PCIE_XILINX_MDB_DT_OFF_GAP 0x100000
#define DW_PCIE_XILINX_MDB_DT_SIZE 0x800
#define DW_BLOCK(a, b, c) \
{ \
@@ -50,6 +71,7 @@ struct dw_edma_pcie_data {
u8 irqs;
u16 wr_ch_cnt;
u16 rd_ch_cnt;
u64 devmem_phys_off;
};
static const struct dw_edma_pcie_data snps_edda_data = {
@@ -90,6 +112,64 @@ static const struct dw_edma_pcie_data snps_edda_data = {
.rd_ch_cnt = 2,
};
static const struct dw_edma_pcie_data xilinx_mdb_data = {
/* MDB registers location */
.rg.bar = BAR_0,
.rg.off = SZ_4K, /* 4 Kbytes */
.rg.sz = SZ_8K, /* 8 Kbytes */
/* Other */
.mf = EDMA_MF_HDMA_NATIVE,
.irqs = 1,
.wr_ch_cnt = 8,
.rd_ch_cnt = 8,
};
static void dw_edma_set_chan_region_offset(struct dw_edma_pcie_data *pdata,
enum pci_barno bar, off_t start_off,
off_t ll_off_gap, size_t ll_size,
off_t dt_off_gap, size_t dt_size)
{
u16 wr_ch = pdata->wr_ch_cnt;
u16 rd_ch = pdata->rd_ch_cnt;
off_t off;
u16 i;
off = start_off;
/* Write channel LL region */
for (i = 0; i < wr_ch; i++) {
pdata->ll_wr[i].bar = bar;
pdata->ll_wr[i].off = off;
pdata->ll_wr[i].sz = ll_size;
off += ll_off_gap;
}
/* Read channel LL region */
for (i = 0; i < rd_ch; i++) {
pdata->ll_rd[i].bar = bar;
pdata->ll_rd[i].off = off;
pdata->ll_rd[i].sz = ll_size;
off += ll_off_gap;
}
/* Write channel data region */
for (i = 0; i < wr_ch; i++) {
pdata->dt_wr[i].bar = bar;
pdata->dt_wr[i].off = off;
pdata->dt_wr[i].sz = dt_size;
off += dt_off_gap;
}
/* Read channel data region */
for (i = 0; i < rd_ch; i++) {
pdata->dt_rd[i].bar = bar;
pdata->dt_rd[i].off = off;
pdata->dt_rd[i].sz = dt_size;
off += dt_off_gap;
}
}
static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr)
{
return pci_irq_vector(to_pci_dev(dev), nr);
@@ -114,15 +194,15 @@ static const struct dw_edma_plat_ops dw_edma_pcie_plat_ops = {
.pci_address = dw_edma_pcie_address,
};
static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
struct dw_edma_pcie_data *pdata)
static void dw_edma_pcie_get_synopsys_dma_data(struct pci_dev *pdev,
struct dw_edma_pcie_data *pdata)
{
u32 val, map;
u16 vsec;
u64 off;
vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_SYNOPSYS,
DW_PCIE_VSEC_DMA_ID);
DW_PCIE_SYNOPSYS_VSEC_DMA_ID);
if (!vsec)
return;
@@ -131,9 +211,9 @@ static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
PCI_VNDR_HEADER_LEN(val) != 0x18)
return;
pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability DMA\n");
pci_dbg(pdev, "Detected Synopsys PCIe Vendor-Specific Extended Capability DMA\n");
pci_read_config_dword(pdev, vsec + 0x8, &val);
map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val);
map = FIELD_GET(DW_PCIE_SYNOPSYS_VSEC_DMA_MAP, val);
if (map != EDMA_MF_EDMA_LEGACY &&
map != EDMA_MF_EDMA_UNROLL &&
map != EDMA_MF_HDMA_COMPAT &&
@@ -141,13 +221,13 @@ static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
return;
pdata->mf = map;
pdata->rg.bar = FIELD_GET(DW_PCIE_VSEC_DMA_BAR, val);
pdata->rg.bar = FIELD_GET(DW_PCIE_SYNOPSYS_VSEC_DMA_BAR, val);
pci_read_config_dword(pdev, vsec + 0xc, &val);
pdata->wr_ch_cnt = min_t(u16, pdata->wr_ch_cnt,
FIELD_GET(DW_PCIE_VSEC_DMA_WR_CH, val));
FIELD_GET(DW_PCIE_SYNOPSYS_VSEC_DMA_WR_CH, val));
pdata->rd_ch_cnt = min_t(u16, pdata->rd_ch_cnt,
FIELD_GET(DW_PCIE_VSEC_DMA_RD_CH, val));
FIELD_GET(DW_PCIE_SYNOPSYS_VSEC_DMA_RD_CH, val));
pci_read_config_dword(pdev, vsec + 0x14, &val);
off = val;
@@ -157,6 +237,73 @@ static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
pdata->rg.off = off;
}
static void dw_edma_pcie_get_xilinx_dma_data(struct pci_dev *pdev,
struct dw_edma_pcie_data *pdata)
{
u32 val, map;
u16 vsec;
u64 off;
pdata->devmem_phys_off = DW_PCIE_XILINX_MDB_INVALID_ADDR;
vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_XILINX,
DW_PCIE_XILINX_MDB_VSEC_DMA_ID);
if (!vsec)
return;
pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
if (PCI_VNDR_HEADER_REV(val) != 0x00 ||
PCI_VNDR_HEADER_LEN(val) != 0x18)
return;
pci_dbg(pdev, "Detected Xilinx PCIe Vendor-Specific Extended Capability DMA\n");
pci_read_config_dword(pdev, vsec + 0x8, &val);
map = FIELD_GET(DW_PCIE_XILINX_MDB_VSEC_DMA_MAP, val);
if (map != EDMA_MF_HDMA_NATIVE)
return;
pdata->mf = map;
pdata->rg.bar = FIELD_GET(DW_PCIE_XILINX_MDB_VSEC_DMA_BAR, val);
pci_read_config_dword(pdev, vsec + 0xc, &val);
pdata->wr_ch_cnt = min(pdata->wr_ch_cnt,
FIELD_GET(DW_PCIE_XILINX_MDB_VSEC_DMA_WR_CH, val));
pdata->rd_ch_cnt = min(pdata->rd_ch_cnt,
FIELD_GET(DW_PCIE_XILINX_MDB_VSEC_DMA_RD_CH, val));
pci_read_config_dword(pdev, vsec + 0x14, &val);
off = val;
pci_read_config_dword(pdev, vsec + 0x10, &val);
off <<= 32;
off |= val;
pdata->rg.off = off;
vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_XILINX,
DW_PCIE_XILINX_MDB_VSEC_ID);
if (!vsec)
return;
pci_read_config_dword(pdev,
vsec + DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_HIGH,
&val);
off = val;
pci_read_config_dword(pdev,
vsec + DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_LOW,
&val);
off <<= 32;
off |= val;
pdata->devmem_phys_off = off;
}
static u64 dw_edma_get_phys_addr(struct pci_dev *pdev,
struct dw_edma_pcie_data *pdata,
enum pci_barno bar)
{
if (pdev->vendor == PCI_VENDOR_ID_XILINX)
return pdata->devmem_phys_off;
return pci_bus_address(pdev, bar);
}
static int dw_edma_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *pid)
{
@@ -165,6 +312,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
struct dw_edma_chip *chip;
int err, nr_irqs;
int i, mask;
bool non_ll = false;
struct dw_edma_pcie_data *vsec_data __free(kfree) =
kmalloc_obj(*vsec_data);
@@ -184,7 +332,32 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
* Tries to find if exists a PCIe Vendor-Specific Extended Capability
* for the DMA, if one exists, then reconfigures it.
*/
dw_edma_pcie_get_vsec_dma_data(pdev, vsec_data);
dw_edma_pcie_get_synopsys_dma_data(pdev, vsec_data);
if (pdev->vendor == PCI_VENDOR_ID_XILINX) {
dw_edma_pcie_get_xilinx_dma_data(pdev, vsec_data);
/*
* There is no valid address found for the LL memory
* space on the device side. In the absence of LL base
* address use the non-LL mode or simple mode supported by
* the HDMA IP.
*/
if (vsec_data->devmem_phys_off == DW_PCIE_XILINX_MDB_INVALID_ADDR)
non_ll = true;
/*
* Configure the channel LL and data blocks if number of
* channels enabled in VSEC capability are more than the
* channels configured in xilinx_mdb_data.
*/
if (!non_ll)
dw_edma_set_chan_region_offset(vsec_data, BAR_2, 0,
DW_PCIE_XILINX_MDB_LL_OFF_GAP,
DW_PCIE_XILINX_MDB_LL_SIZE,
DW_PCIE_XILINX_MDB_DT_OFF_GAP,
DW_PCIE_XILINX_MDB_DT_SIZE);
}
/* Mapping PCI BAR regions */
mask = BIT(vsec_data->rg.bar);
@@ -231,6 +404,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
chip->mf = vsec_data->mf;
chip->nr_irqs = nr_irqs;
chip->ops = &dw_edma_pcie_plat_ops;
chip->cfg_non_ll = non_ll;
chip->ll_wr_cnt = vsec_data->wr_ch_cnt;
chip->ll_rd_cnt = vsec_data->rd_ch_cnt;
@@ -239,7 +413,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
if (!chip->reg_base)
return -ENOMEM;
for (i = 0; i < chip->ll_wr_cnt; i++) {
for (i = 0; i < chip->ll_wr_cnt && !non_ll; i++) {
struct dw_edma_region *ll_region = &chip->ll_region_wr[i];
struct dw_edma_region *dt_region = &chip->dt_region_wr[i];
struct dw_edma_block *ll_block = &vsec_data->ll_wr[i];
@@ -250,7 +424,8 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return -ENOMEM;
ll_region->vaddr.io += ll_block->off;
ll_region->paddr = pci_bus_address(pdev, ll_block->bar);
ll_region->paddr = dw_edma_get_phys_addr(pdev, vsec_data,
ll_block->bar);
ll_region->paddr += ll_block->off;
ll_region->sz = ll_block->sz;
@@ -259,12 +434,13 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return -ENOMEM;
dt_region->vaddr.io += dt_block->off;
dt_region->paddr = pci_bus_address(pdev, dt_block->bar);
dt_region->paddr = dw_edma_get_phys_addr(pdev, vsec_data,
dt_block->bar);
dt_region->paddr += dt_block->off;
dt_region->sz = dt_block->sz;
}
for (i = 0; i < chip->ll_rd_cnt; i++) {
for (i = 0; i < chip->ll_rd_cnt && !non_ll; i++) {
struct dw_edma_region *ll_region = &chip->ll_region_rd[i];
struct dw_edma_region *dt_region = &chip->dt_region_rd[i];
struct dw_edma_block *ll_block = &vsec_data->ll_rd[i];
@@ -275,7 +451,8 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return -ENOMEM;
ll_region->vaddr.io += ll_block->off;
ll_region->paddr = pci_bus_address(pdev, ll_block->bar);
ll_region->paddr = dw_edma_get_phys_addr(pdev, vsec_data,
ll_block->bar);
ll_region->paddr += ll_block->off;
ll_region->sz = ll_block->sz;
@@ -284,7 +461,8 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return -ENOMEM;
dt_region->vaddr.io += dt_block->off;
dt_region->paddr = pci_bus_address(pdev, dt_block->bar);
dt_region->paddr = dw_edma_get_phys_addr(pdev, vsec_data,
dt_block->bar);
dt_region->paddr += dt_block->off;
dt_region->sz = dt_block->sz;
}
@@ -367,6 +545,8 @@ static void dw_edma_pcie_remove(struct pci_dev *pdev)
static const struct pci_device_id dw_edma_pcie_id_table[] = {
{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) },
{ PCI_VDEVICE(XILINX, PCI_DEVICE_ID_XILINX_B054),
(kernel_ulong_t)&xilinx_mdb_data },
{ }
};
MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table);

View File

@@ -509,6 +509,25 @@ static void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
dw_edma_v0_debugfs_on(dw);
}
static void dw_edma_v0_core_ack_emulated_irq(struct dw_edma *dw)
{
/*
* Interrupt emulation may assert the IRQ without setting
* DONE/ABORT status bits. A zero write to INT_CLEAR deasserts the
* emulated IRQ, while being a no-op for real interrupts.
*/
SET_BOTH_32(dw, int_clear, 0);
}
static resource_size_t dw_edma_v0_core_db_offset(struct dw_edma *dw)
{
/*
* rd_int_status is chosen arbitrarily, but wr_int_status would be
* equally suitable.
*/
return offsetof(struct dw_edma_v0_regs, rd_int_status);
}
static const struct dw_edma_core_ops dw_edma_v0_core = {
.off = dw_edma_v0_core_off,
.ch_count = dw_edma_v0_core_ch_count,
@@ -517,6 +536,8 @@ static const struct dw_edma_core_ops dw_edma_v0_core = {
.start = dw_edma_v0_core_start,
.ch_config = dw_edma_v0_core_ch_config,
.debugfs_on = dw_edma_v0_core_debugfs_on,
.ack_emulated_irq = dw_edma_v0_core_ack_emulated_irq,
.db_offset = dw_edma_v0_core_db_offset,
};
void dw_edma_v0_core_register(struct dw_edma *dw)

View File

@@ -225,7 +225,7 @@ static void dw_hdma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
readl(chunk->ll_region.vaddr.io);
}
static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
static void dw_hdma_v0_core_ll_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
struct dw_edma *dw = chan->dw;
@@ -263,6 +263,68 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
}
static void dw_hdma_v0_core_non_ll_start(struct dw_edma_chunk *chunk)
{
struct dw_edma_chan *chan = chunk->chan;
struct dw_edma *dw = chan->dw;
struct dw_edma_burst *child;
u32 val;
child = list_first_entry_or_null(&chunk->burst->list,
struct dw_edma_burst, list);
if (!child)
return;
SET_CH_32(dw, chan->dir, chan->id, ch_en, HDMA_V0_CH_EN);
/* Source address */
SET_CH_32(dw, chan->dir, chan->id, sar.lsb,
lower_32_bits(child->sar));
SET_CH_32(dw, chan->dir, chan->id, sar.msb,
upper_32_bits(child->sar));
/* Destination address */
SET_CH_32(dw, chan->dir, chan->id, dar.lsb,
lower_32_bits(child->dar));
SET_CH_32(dw, chan->dir, chan->id, dar.msb,
upper_32_bits(child->dar));
/* Transfer size */
SET_CH_32(dw, chan->dir, chan->id, transfer_size, child->sz);
/* Interrupt setup */
val = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
HDMA_V0_STOP_INT_MASK |
HDMA_V0_ABORT_INT_MASK |
HDMA_V0_LOCAL_STOP_INT_EN |
HDMA_V0_LOCAL_ABORT_INT_EN;
if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL)) {
val |= HDMA_V0_REMOTE_STOP_INT_EN |
HDMA_V0_REMOTE_ABORT_INT_EN;
}
SET_CH_32(dw, chan->dir, chan->id, int_setup, val);
/* Channel control setup */
val = GET_CH_32(dw, chan->dir, chan->id, control1);
val &= ~HDMA_V0_LINKLIST_EN;
SET_CH_32(dw, chan->dir, chan->id, control1, val);
SET_CH_32(dw, chan->dir, chan->id, doorbell,
HDMA_V0_DOORBELL_START);
}
static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
if (chan->non_ll)
dw_hdma_v0_core_non_ll_start(chunk);
else
dw_hdma_v0_core_ll_start(chunk, first);
}
static void dw_hdma_v0_core_ch_config(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
@@ -283,6 +345,12 @@ static void dw_hdma_v0_core_debugfs_on(struct dw_edma *dw)
dw_hdma_v0_debugfs_on(dw);
}
static resource_size_t dw_hdma_v0_core_db_offset(struct dw_edma *dw)
{
/* Implement once the correct offset is known. */
return ~0;
}
static const struct dw_edma_core_ops dw_hdma_v0_core = {
.off = dw_hdma_v0_core_off,
.ch_count = dw_hdma_v0_core_ch_count,
@@ -291,6 +359,7 @@ static const struct dw_edma_core_ops dw_hdma_v0_core = {
.start = dw_hdma_v0_core_start,
.ch_config = dw_hdma_v0_core_ch_config,
.debugfs_on = dw_hdma_v0_core_debugfs_on,
.db_offset = dw_hdma_v0_core_db_offset,
};
void dw_hdma_v0_core_register(struct dw_edma *dw)

View File

@@ -12,6 +12,7 @@
#include <linux/dmaengine.h>
#define HDMA_V0_MAX_NR_CH 8
#define HDMA_V0_CH_EN BIT(0)
#define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6)
#define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5)
#define HDMA_V0_LOCAL_STOP_INT_EN BIT(4)

View File

@@ -905,8 +905,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
fsl_chan->is_sw = false;
fsl_chan->srcid = 0;
fsl_chan->is_remote = false;
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
clk_disable_unprepare(fsl_chan->clk);
clk_disable_unprepare(fsl_chan->clk);
}
void fsl_edma_cleanup_vchan(struct dma_device *dmadev)

View File

@@ -705,16 +705,14 @@ static int fsl_edma_probe(struct platform_device *pdev)
int ret, i;
drvdata = device_get_match_data(&pdev->dev);
if (!drvdata) {
dev_err(&pdev->dev, "unable to find driver data\n");
return -EINVAL;
}
if (!drvdata)
return dev_err_probe(&pdev->dev, -EINVAL,
"unable to find driver data\n");
ret = of_property_read_u32(np, "dma-channels", &chans);
if (ret) {
dev_err(&pdev->dev, "Can't get dma-channels.\n");
return ret;
}
if (ret)
return dev_err_probe(&pdev->dev, ret,
"Can't get dma-channels.\n");
fsl_edma = devm_kzalloc(&pdev->dev, struct_size(fsl_edma, chans, chans),
GFP_KERNEL);
@@ -738,10 +736,10 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (drvdata->flags & FSL_EDMA_DRV_HAS_DMACLK) {
fsl_edma->dmaclk = devm_clk_get_enabled(&pdev->dev, "dma");
if (IS_ERR(fsl_edma->dmaclk)) {
dev_err(&pdev->dev, "Missing DMA block clock.\n");
return PTR_ERR(fsl_edma->dmaclk);
}
if (IS_ERR(fsl_edma->dmaclk))
return dev_err_probe(&pdev->dev,
PTR_ERR(fsl_edma->dmaclk),
"Missing DMA block clock.\n");
}
ret = of_property_read_variable_u32_array(np, "dma-channel-mask", chan_mask, 1, 2);
@@ -765,11 +763,10 @@ static int fsl_edma_probe(struct platform_device *pdev)
sprintf(clkname, "dmamux%d", i);
fsl_edma->muxclk[i] = devm_clk_get_enabled(&pdev->dev, clkname);
if (IS_ERR(fsl_edma->muxclk[i])) {
dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
/* on error: disable all previously enabled clks */
return PTR_ERR(fsl_edma->muxclk[i]);
}
if (IS_ERR(fsl_edma->muxclk[i]))
return dev_err_probe(&pdev->dev,
PTR_ERR(fsl_edma->muxclk[i]),
"Missing DMAMUX block clock.\n");
}
fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
@@ -878,22 +875,17 @@ static int fsl_edma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fsl_edma);
ret = dma_async_device_register(&fsl_edma->dma_dev);
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA engine. (%d)\n", ret);
return ret;
}
ret = dmaenginem_async_device_register(&fsl_edma->dma_dev);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"Can't register Freescale eDMA engine.\n");
ret = of_dma_controller_register(np,
ret = devm_of_dma_controller_register(&pdev->dev, np,
drvdata->dmamuxs ? fsl_edma_xlate : fsl_edma3_xlate,
fsl_edma);
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA of_dma. (%d)\n", ret);
dma_async_device_unregister(&fsl_edma->dma_dev);
return ret;
}
if (ret)
return dev_err_probe(&pdev->dev, ret,
"Can't register Freescale eDMA of_dma.\n");
/* enable round robin arbitration */
if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
@@ -904,12 +896,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
static void fsl_edma_remove(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
fsl_edma_irq_exit(pdev, fsl_edma);
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_edma->dma_dev);
fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
}

View File

@@ -1127,22 +1127,19 @@ static int fsl_qdma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
ret = of_property_read_u32(np, "dma-channels", &chans);
if (ret) {
dev_err(&pdev->dev, "Can't get dma-channels.\n");
return ret;
}
if (ret)
return dev_err_probe(&pdev->dev, ret,
"Can't get dma-channels.\n");
ret = of_property_read_u32(np, "block-offset", &blk_off);
if (ret) {
dev_err(&pdev->dev, "Can't get block-offset.\n");
return ret;
}
if (ret)
return dev_err_probe(&pdev->dev, ret,
"Can't get block-offset.\n");
ret = of_property_read_u32(np, "block-number", &blk_num);
if (ret) {
dev_err(&pdev->dev, "Can't get block-number.\n");
return ret;
}
if (ret)
return dev_err_probe(&pdev->dev, ret,
"Can't get block-number.\n");
blk_num = min_t(int, blk_num, num_online_cpus());
@@ -1167,10 +1164,8 @@ static int fsl_qdma_probe(struct platform_device *pdev)
return -ENOMEM;
ret = of_property_read_u32(np, "fsl,dma-queues", &queues);
if (ret) {
dev_err(&pdev->dev, "Can't get queues.\n");
return ret;
}
if (ret)
return dev_err_probe(&pdev->dev, ret, "Can't get queues.\n");
fsl_qdma->desc_allocated = 0;
fsl_qdma->n_chans = chans;
@@ -1231,28 +1226,24 @@ static int fsl_qdma_probe(struct platform_device *pdev)
fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
if (ret) {
dev_err(&pdev->dev, "dma_set_mask failure.\n");
return ret;
}
if (ret)
return dev_err_probe(&pdev->dev, ret, "dma_set_mask failure.\n");
platform_set_drvdata(pdev, fsl_qdma);
ret = fsl_qdma_reg_init(fsl_qdma);
if (ret) {
dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
return ret;
}
if (ret)
return dev_err_probe(&pdev->dev, ret,
"Can't Initialize the qDMA engine.\n");
ret = fsl_qdma_irq_init(pdev, fsl_qdma);
if (ret)
return ret;
ret = dma_async_device_register(&fsl_qdma->dma_dev);
if (ret) {
dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
return ret;
}
if (ret)
return dev_err_probe(&pdev->dev, ret,
"Can't register NXP Layerscape qDMA engine.\n");
return 0;
}

View File

@@ -2265,34 +2265,24 @@ static int sdma_probe(struct platform_device *pdev)
if (IS_ERR(sdma->regs))
return PTR_ERR(sdma->regs);
sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
sdma->clk_ipg = devm_clk_get_prepared(&pdev->dev, "ipg");
if (IS_ERR(sdma->clk_ipg))
return PTR_ERR(sdma->clk_ipg);
sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
sdma->clk_ahb = devm_clk_get_prepared(&pdev->dev, "ahb");
if (IS_ERR(sdma->clk_ahb))
return PTR_ERR(sdma->clk_ahb);
ret = clk_prepare(sdma->clk_ipg);
if (ret)
return ret;
ret = clk_prepare(sdma->clk_ahb);
if (ret)
goto err_clk;
ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0,
dev_name(&pdev->dev), sdma);
if (ret)
goto err_irq;
return ret;
sdma->irq = irq;
sdma->script_addrs = kzalloc_obj(*sdma->script_addrs);
if (!sdma->script_addrs) {
ret = -ENOMEM;
goto err_irq;
}
if (!sdma->script_addrs)
return -ENOMEM;
/* initially no scripts available */
saddr_arr = (s32 *)sdma->script_addrs;
@@ -2333,11 +2323,11 @@ static int sdma_probe(struct platform_device *pdev)
ret = sdma_init(sdma);
if (ret)
goto err_init;
return ret;
ret = sdma_event_remap(sdma);
if (ret)
goto err_init;
return ret;
if (sdma->drvdata->script_addrs)
sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
@@ -2363,18 +2353,16 @@ static int sdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sdma);
ret = dma_async_device_register(&sdma->dma_device);
if (ret) {
dev_err(&pdev->dev, "unable to register\n");
goto err_init;
}
ret = dmaenginem_async_device_register(&sdma->dma_device);
if (ret)
return dev_err_probe(&pdev->dev, ret, "unable to register\n");
if (np) {
ret = of_dma_controller_register(np, sdma_xlate, sdma);
if (ret) {
dev_err(&pdev->dev, "failed to register controller\n");
goto err_register;
}
ret = devm_of_dma_controller_register(&pdev->dev, np,
sdma_xlate, sdma);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"failed to register controller\n");
spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
ret = of_address_to_resource(spba_bus, 0, &spba_res);
@@ -2401,16 +2389,6 @@ static int sdma_probe(struct platform_device *pdev)
}
return 0;
err_register:
dma_async_device_unregister(&sdma->dma_device);
err_init:
kfree(sdma->script_addrs);
err_irq:
clk_unprepare(sdma->clk_ahb);
err_clk:
clk_unprepare(sdma->clk_ipg);
return ret;
}
static void sdma_remove(struct platform_device *pdev)
@@ -2419,10 +2397,6 @@ static void sdma_remove(struct platform_device *pdev)
int i;
devm_free_irq(&pdev->dev, sdma->irq, sdma);
dma_async_device_unregister(&sdma->dma_device);
kfree(sdma->script_addrs);
clk_unprepare(sdma->clk_ahb);
clk_unprepare(sdma->clk_ipg);
/* Kill the tasklet */
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct sdma_channel *sdmac = &sdma->channel[i];

View File

@@ -140,12 +140,6 @@ struct ioatdma_chan {
int prev_intr_coalesce;
};
struct ioat_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct dma_chan *, char *);
ssize_t (*store)(struct dma_chan *, const char *, size_t);
};
/**
* struct ioat_sed_ent - wrapper around super extended hardware descriptor
* @hw: hardware SED
@@ -195,11 +189,8 @@ struct ioat_ring_ent {
struct ioat_sed_ent *sed;
};
extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
extern struct ioat_sysfs_entry ioat_cap_attr;
extern int ioat_pending_level;
extern struct kobj_type ioat_ktype;
extern const struct kobj_type ioat_ktype;
extern struct kmem_cache *ioat_cache;
extern struct kmem_cache *ioat_sed_cache;
@@ -402,7 +393,7 @@ void ioat_issue_pending(struct dma_chan *chan);
/* IOAT Init functions */
bool is_bwd_ioat(struct pci_dev *pdev);
struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
void ioat_kobject_add(struct ioatdma_device *ioat_dma, const struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *ioat_dma);
int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
void ioat_stop(struct ioatdma_chan *ioat_chan);

View File

@@ -14,6 +14,12 @@
#include "../dmaengine.h"
struct ioat_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct dma_chan *, char *);
ssize_t (*store)(struct dma_chan *, const char *, size_t);
};
static ssize_t cap_show(struct dma_chan *c, char *page)
{
struct dma_device *dma = c->device;
@@ -26,7 +32,7 @@ static ssize_t cap_show(struct dma_chan *c, char *page)
dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
}
struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
static const struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
static ssize_t version_show(struct dma_chan *c, char *page)
{
@@ -36,15 +42,15 @@ static ssize_t version_show(struct dma_chan *c, char *page)
return sprintf(page, "%d.%d\n",
ioat_dma->version >> 4, ioat_dma->version & 0xf);
}
struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
static const struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
static ssize_t
ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct ioat_sysfs_entry *entry;
const struct ioat_sysfs_entry *entry;
struct ioatdma_chan *ioat_chan;
entry = container_of(attr, struct ioat_sysfs_entry, attr);
entry = container_of_const(attr, struct ioat_sysfs_entry, attr);
ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
if (!entry->show)
@@ -56,10 +62,10 @@ static ssize_t
ioat_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t count)
{
struct ioat_sysfs_entry *entry;
const struct ioat_sysfs_entry *entry;
struct ioatdma_chan *ioat_chan;
entry = container_of(attr, struct ioat_sysfs_entry, attr);
entry = container_of_const(attr, struct ioat_sysfs_entry, attr);
ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
if (!entry->store)
@@ -67,12 +73,12 @@ const char *page, size_t count)
return entry->store(&ioat_chan->dma_chan, page, count);
}
const struct sysfs_ops ioat_sysfs_ops = {
static const struct sysfs_ops ioat_sysfs_ops = {
.show = ioat_attr_show,
.store = ioat_attr_store,
};
void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type)
void ioat_kobject_add(struct ioatdma_device *ioat_dma, const struct kobj_type *type)
{
struct dma_device *dma = &ioat_dma->dma_dev;
struct dma_chan *c;
@@ -114,7 +120,7 @@ static ssize_t ring_size_show(struct dma_chan *c, char *page)
return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1);
}
static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
static const struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
static ssize_t ring_active_show(struct dma_chan *c, char *page)
{
@@ -123,7 +129,7 @@ static ssize_t ring_active_show(struct dma_chan *c, char *page)
/* ...taken outside the lock, no need to be precise */
return sprintf(page, "%d\n", ioat_ring_active(ioat_chan));
}
static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
static const struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
static ssize_t intr_coalesce_show(struct dma_chan *c, char *page)
{
@@ -148,9 +154,9 @@ size_t count)
return count;
}
static struct ioat_sysfs_entry intr_coalesce_attr = __ATTR_RW(intr_coalesce);
static const struct ioat_sysfs_entry intr_coalesce_attr = __ATTR_RW(intr_coalesce);
static struct attribute *ioat_attrs[] = {
static const struct attribute *const ioat_attrs[] = {
&ring_size_attr.attr,
&ring_active_attr.attr,
&ioat_cap_attr.attr,
@@ -160,7 +166,7 @@ static struct attribute *ioat_attrs[] = {
};
ATTRIBUTE_GROUPS(ioat);
struct kobj_type ioat_ktype = {
const struct kobj_type ioat_ktype = {
.sysfs_ops = &ioat_sysfs_ops,
.default_groups = ioat_groups,
};

View File

@@ -0,0 +1,41 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# Loongson DMA controllers drivers
#
if MACH_LOONGSON32 || MACH_LOONGSON64 || COMPILE_TEST
config LOONGSON1_APB_DMA
tristate "Loongson1 APB DMA support"
depends on MACH_LOONGSON32 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
This selects support for the APB DMA controller in Loongson1 SoCs,
which is required by Loongson1 NAND and audio support.
config LOONGSON2_APB_DMA
tristate "Loongson2 APB DMA support"
depends on MACH_LOONGSON64 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Support for the Loongson2 APB DMA controller driver. The
DMA controller is having single DMA channel which can be
configured for different peripherals like audio, nand, sdio
etc which is in APB bus.
This DMA controller transfers data from memory to peripheral fifo.
It does not support memory to memory data transfer.
config LOONGSON2_APB_CMC_DMA
tristate "Loongson2 Chain Multi-Channel DMA support"
depends on MACH_LOONGSON64 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Support for the Loongson Chain Multi-Channel DMA controller driver.
It is discovered on the Loongson-2K chip (Loongson-2K0300/Loongson-2K3000),
which has 4/8 channels internally, enabling bidirectional data transfer
between devices and memory.
endif

View File

@@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_LOONGSON1_APB_DMA) += loongson1-apb-dma.o
obj-$(CONFIG_LOONGSON2_APB_DMA) += loongson2-apb-dma.o
obj-$(CONFIG_LOONGSON2_APB_CMC_DMA) += loongson2-apb-cmc-dma.o

View File

@@ -16,8 +16,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "dmaengine.h"
#include "virt-dma.h"
#include "../dmaengine.h"
#include "../virt-dma.h"
/* Loongson-1 DMA Control Register */
#define LS1X_DMA_CTRL 0x0

View File

@@ -0,0 +1,730 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Loongson-2 Chain Multi-Channel DMA Controller driver
*
* Copyright (C) 2024-2026 Loongson Technology Corporation Limited
*/
#include <linux/acpi.h>
#include <linux/acpi_dma.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "../dmaengine.h"
#include "../virt-dma.h"
#define LOONGSON2_CMCDMA_ISR 0x0 /* DMA Interrupt Status Register */
#define LOONGSON2_CMCDMA_IFCR 0x4 /* DMA Interrupt Flag Clear Register */
#define LOONGSON2_CMCDMA_CCR 0x8 /* DMA Channel Configuration Register */
#define LOONGSON2_CMCDMA_CNDTR 0xc /* DMA Channel Transmit Count Register */
#define LOONGSON2_CMCDMA_CPAR 0x10 /* DMA Channel Peripheral Address Register */
#define LOONGSON2_CMCDMA_CMAR 0x14 /* DMA Channel Memory Address Register */
/* Bitfields of DMA interrupt status register */
#define LOONGSON2_CMCDMA_TCI BIT(1) /* Transfer Complete Interrupt */
#define LOONGSON2_CMCDMA_HTI BIT(2) /* Half Transfer Interrupt */
#define LOONGSON2_CMCDMA_TEI BIT(3) /* Transfer Error Interrupt */
#define LOONGSON2_CMCDMA_MASKI \
(LOONGSON2_CMCDMA_TCI | LOONGSON2_CMCDMA_HTI | LOONGSON2_CMCDMA_TEI)
/* Bitfields of DMA channel x Configuration Register */
#define LOONGSON2_CMCDMA_CCR_EN BIT(0) /* Stream Enable */
#define LOONGSON2_CMCDMA_CCR_TCIE BIT(1) /* Transfer Complete Interrupt Enable */
#define LOONGSON2_CMCDMA_CCR_HTIE BIT(2) /* Half Transfer Complete Interrupt Enable */
#define LOONGSON2_CMCDMA_CCR_TEIE BIT(3) /* Transfer Error Interrupt Enable */
#define LOONGSON2_CMCDMA_CCR_DIR BIT(4) /* Data Transfer Direction */
#define LOONGSON2_CMCDMA_CCR_CIRC BIT(5) /* Circular mode */
#define LOONGSON2_CMCDMA_CCR_PINC BIT(6) /* Peripheral increment mode */
#define LOONGSON2_CMCDMA_CCR_MINC BIT(7) /* Memory increment mode */
#define LOONGSON2_CMCDMA_CCR_PSIZE_MASK GENMASK(9, 8)
#define LOONGSON2_CMCDMA_CCR_MSIZE_MASK GENMASK(11, 10)
#define LOONGSON2_CMCDMA_CCR_PL_MASK GENMASK(13, 12)
#define LOONGSON2_CMCDMA_CCR_M2M BIT(14)
#define LOONGSON2_CMCDMA_CCR_CFG_MASK \
(LOONGSON2_CMCDMA_CCR_PINC | LOONGSON2_CMCDMA_CCR_MINC | LOONGSON2_CMCDMA_CCR_PL_MASK)
#define LOONGSON2_CMCDMA_CCR_IRQ_MASK \
(LOONGSON2_CMCDMA_CCR_TCIE | LOONGSON2_CMCDMA_CCR_HTIE | LOONGSON2_CMCDMA_CCR_TEIE)
#define LOONGSON2_CMCDMA_STREAM_MASK \
(LOONGSON2_CMCDMA_CCR_CFG_MASK | LOONGSON2_CMCDMA_CCR_IRQ_MASK)
#define LOONGSON2_CMCDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
#define LOONSON2_CMCDMA_MAX_DATA_ITEMS SZ_64K
struct loongson2_cmc_dma_chan_reg {
u32 ccr;
u32 cndtr;
u32 cpar;
u32 cmar;
};
struct loongson2_cmc_dma_sg_req {
u32 len;
struct loongson2_cmc_dma_chan_reg chan_reg;
};
struct loongson2_cmc_dma_desc {
struct virt_dma_desc vdesc;
bool cyclic;
u32 num_sgs;
struct loongson2_cmc_dma_sg_req sg_req[] __counted_by(num_sgs);
};
struct loongson2_cmc_dma_chan {
struct virt_dma_chan vchan;
struct dma_slave_config dma_sconfig;
struct loongson2_cmc_dma_desc *desc;
u32 id;
u32 irq;
u32 next_sg;
struct loongson2_cmc_dma_chan_reg chan_reg;
};
struct loongson2_cmc_dma_dev {
struct dma_device ddev;
struct clk *dma_clk;
void __iomem *base;
u32 nr_channels;
u32 chan_reg_offset;
struct loongson2_cmc_dma_chan chan[] __counted_by(nr_channels);
};
struct loongson2_cmc_dma_config {
u32 max_channels;
u32 chan_reg_offset;
};
static const struct loongson2_cmc_dma_config ls2k0300_cmc_dma_config = {
.max_channels = 8,
.chan_reg_offset = 0x14,
};
static const struct loongson2_cmc_dma_config ls2k3000_cmc_dma_config = {
.max_channels = 4,
.chan_reg_offset = 0x18,
};
static struct loongson2_cmc_dma_dev *lmdma_get_dev(struct loongson2_cmc_dma_chan *lchan)
{
return container_of(lchan->vchan.chan.device, struct loongson2_cmc_dma_dev, ddev);
}
static struct loongson2_cmc_dma_chan *to_lmdma_chan(struct dma_chan *chan)
{
return container_of(chan, struct loongson2_cmc_dma_chan, vchan.chan);
}
static struct loongson2_cmc_dma_desc *to_lmdma_desc(struct virt_dma_desc *vdesc)
{
return container_of(vdesc, struct loongson2_cmc_dma_desc, vdesc);
}
static struct device *chan2dev(struct loongson2_cmc_dma_chan *lchan)
{
return &lchan->vchan.chan.dev->device;
}
static u32 loongson2_cmc_dma_read(struct loongson2_cmc_dma_dev *lddev, u32 reg, u32 id)
{
return readl(lddev->base + (reg + lddev->chan_reg_offset * id));
}
static void loongson2_cmc_dma_write(struct loongson2_cmc_dma_dev *lddev, u32 reg, u32 id, u32 val)
{
writel(val, lddev->base + (reg + lddev->chan_reg_offset * id));
}
static int loongson2_cmc_dma_get_width(enum dma_slave_buswidth width)
{
switch (width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
case DMA_SLAVE_BUSWIDTH_2_BYTES:
case DMA_SLAVE_BUSWIDTH_4_BYTES:
return ffs(width) - 1;
default:
return -EINVAL;
}
}
static int loongson2_cmc_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *config)
{
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
memcpy(&lchan->dma_sconfig, config, sizeof(*config));
return 0;
}
static void loongson2_cmc_dma_irq_clear(struct loongson2_cmc_dma_chan *lchan, u32 flags)
{
struct loongson2_cmc_dma_dev *lddev = lmdma_get_dev(lchan);
u32 ifcr;
ifcr = flags << (4 * lchan->id);
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_IFCR, 0, ifcr);
}
static void loongson2_cmc_dma_stop(struct loongson2_cmc_dma_chan *lchan)
{
struct loongson2_cmc_dma_dev *lddev = lmdma_get_dev(lchan);
u32 ccr;
ccr = loongson2_cmc_dma_read(lddev, LOONGSON2_CMCDMA_CCR, lchan->id);
ccr &= ~(LOONGSON2_CMCDMA_CCR_IRQ_MASK | LOONGSON2_CMCDMA_CCR_EN);
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CCR, lchan->id, ccr);
loongson2_cmc_dma_irq_clear(lchan, LOONGSON2_CMCDMA_MASKI);
}
static int loongson2_cmc_dma_terminate_all(struct dma_chan *chan)
{
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
LIST_HEAD(head);
scoped_guard(spinlock_irqsave, &lchan->vchan.lock) {
if (lchan->desc) {
vchan_terminate_vdesc(&lchan->desc->vdesc);
loongson2_cmc_dma_stop(lchan);
lchan->desc = NULL;
}
vchan_get_all_descriptors(&lchan->vchan, &head);
}
vchan_dma_desc_free_list(&lchan->vchan, &head);
return 0;
}
static void loongson2_cmc_dma_synchronize(struct dma_chan *chan)
{
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
vchan_synchronize(&lchan->vchan);
}
static void loongson2_cmc_dma_start_transfer(struct loongson2_cmc_dma_chan *lchan)
{
struct loongson2_cmc_dma_dev *lddev = lmdma_get_dev(lchan);
struct loongson2_cmc_dma_sg_req *sg_req;
struct loongson2_cmc_dma_chan_reg *reg;
struct virt_dma_desc *vdesc;
loongson2_cmc_dma_stop(lchan);
if (!lchan->desc) {
vdesc = vchan_next_desc(&lchan->vchan);
if (!vdesc)
return;
list_del(&vdesc->node);
lchan->desc = to_lmdma_desc(vdesc);
lchan->next_sg = 0;
}
if (lchan->next_sg == lchan->desc->num_sgs)
lchan->next_sg = 0;
sg_req = &lchan->desc->sg_req[lchan->next_sg];
reg = &sg_req->chan_reg;
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CCR, lchan->id, reg->ccr);
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CNDTR, lchan->id, reg->cndtr);
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CPAR, lchan->id, reg->cpar);
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CMAR, lchan->id, reg->cmar);
lchan->next_sg++;
/* Start DMA */
reg->ccr |= LOONGSON2_CMCDMA_CCR_EN;
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CCR, lchan->id, reg->ccr);
}
static void loongson2_cmc_dma_configure_next_sg(struct loongson2_cmc_dma_chan *lchan)
{
struct loongson2_cmc_dma_dev *lddev = lmdma_get_dev(lchan);
struct loongson2_cmc_dma_sg_req *sg_req;
u32 ccr, id = lchan->id;
if (lchan->next_sg == lchan->desc->num_sgs)
lchan->next_sg = 0;
/* Stop to update mem addr */
ccr = loongson2_cmc_dma_read(lddev, LOONGSON2_CMCDMA_CCR, id);
ccr &= ~LOONGSON2_CMCDMA_CCR_EN;
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CCR, id, ccr);
sg_req = &lchan->desc->sg_req[lchan->next_sg];
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CMAR, id, sg_req->chan_reg.cmar);
/* Start transition */
ccr |= LOONGSON2_CMCDMA_CCR_EN;
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CCR, id, ccr);
}
static void loongson2_cmc_dma_handle_chan_done(struct loongson2_cmc_dma_chan *lchan)
{
if (!lchan->desc)
return;
if (lchan->desc->cyclic) {
vchan_cyclic_callback(&lchan->desc->vdesc);
/* LOONGSON2_CMCDMA_CCR_CIRC mode don't need update register */
if (lchan->desc->num_sgs == 1)
return;
loongson2_cmc_dma_configure_next_sg(lchan);
lchan->next_sg++;
} else {
if (lchan->next_sg == lchan->desc->num_sgs) {
vchan_cookie_complete(&lchan->desc->vdesc);
lchan->desc = NULL;
}
loongson2_cmc_dma_start_transfer(lchan);
}
}
static irqreturn_t loongson2_cmc_dma_chan_irq(int irq, void *devid)
{
struct loongson2_cmc_dma_chan *lchan = devid;
struct loongson2_cmc_dma_dev *lddev = lmdma_get_dev(lchan);
struct device *dev = chan2dev(lchan);
u32 ists, status, ccr;
scoped_guard(spinlock, &lchan->vchan.lock) {
ccr = loongson2_cmc_dma_read(lddev, LOONGSON2_CMCDMA_CCR, lchan->id);
ists = loongson2_cmc_dma_read(lddev, LOONGSON2_CMCDMA_ISR, 0);
status = (ists >> (4 * lchan->id)) & LOONGSON2_CMCDMA_MASKI;
loongson2_cmc_dma_irq_clear(lchan, status);
if (status & LOONGSON2_CMCDMA_TCI) {
loongson2_cmc_dma_handle_chan_done(lchan);
status &= ~LOONGSON2_CMCDMA_TCI;
}
if (status & LOONGSON2_CMCDMA_HTI)
status &= ~LOONGSON2_CMCDMA_HTI;
if (status & LOONGSON2_CMCDMA_TEI) {
dev_err(dev, "DMA Transform Error.\n");
if (!(ccr & LOONGSON2_CMCDMA_CCR_EN))
dev_err(dev, "Channel disabled by HW.\n");
}
}
return IRQ_HANDLED;
}
static void loongson2_cmc_dma_issue_pending(struct dma_chan *chan)
{
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
guard(spinlock_irqsave)(&lchan->vchan.lock);
if (vchan_issue_pending(&lchan->vchan) && !lchan->desc) {
dev_dbg(chan2dev(lchan), "vchan %pK: issued\n", &lchan->vchan);
loongson2_cmc_dma_start_transfer(lchan);
}
}
static int loongson2_cmc_dma_set_xfer_param(struct loongson2_cmc_dma_chan *lchan,
enum dma_transfer_direction direction,
enum dma_slave_buswidth *buswidth, u32 buf_len)
{
struct dma_slave_config sconfig = lchan->dma_sconfig;
struct device *dev = chan2dev(lchan);
int dev_width;
u32 ccr;
switch (direction) {
case DMA_MEM_TO_DEV:
dev_width = loongson2_cmc_dma_get_width(sconfig.dst_addr_width);
if (dev_width < 0) {
dev_err(dev, "DMA_MEM_TO_DEV bus width not supported\n");
return dev_width;
}
lchan->chan_reg.cpar = sconfig.dst_addr;
ccr = LOONGSON2_CMCDMA_CCR_DIR;
*buswidth = sconfig.dst_addr_width;
break;
case DMA_DEV_TO_MEM:
dev_width = loongson2_cmc_dma_get_width(sconfig.src_addr_width);
if (dev_width < 0) {
dev_err(dev, "DMA_DEV_TO_MEM bus width not supported\n");
return dev_width;
}
lchan->chan_reg.cpar = sconfig.src_addr;
ccr = LOONGSON2_CMCDMA_CCR_MINC;
*buswidth = sconfig.src_addr_width;
break;
default:
return -EINVAL;
}
ccr |= FIELD_PREP(LOONGSON2_CMCDMA_CCR_PSIZE_MASK, dev_width) |
FIELD_PREP(LOONGSON2_CMCDMA_CCR_MSIZE_MASK, dev_width);
/* Set DMA control register */
lchan->chan_reg.ccr &= ~(LOONGSON2_CMCDMA_CCR_PSIZE_MASK | LOONGSON2_CMCDMA_CCR_MSIZE_MASK);
lchan->chan_reg.ccr |= ccr;
return 0;
}
static struct dma_async_tx_descriptor *
loongson2_cmc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, u32 sg_len,
enum dma_transfer_direction direction,
unsigned long flags, void *context)
{
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
struct loongson2_cmc_dma_desc *desc;
enum dma_slave_buswidth buswidth;
struct scatterlist *sg;
u32 num_items, i;
int ret;
desc = kzalloc_flex(*desc, sg_req, sg_len, GFP_NOWAIT);
if (!desc)
return ERR_PTR(-ENOMEM);
for_each_sg(sgl, sg, sg_len, i) {
ret = loongson2_cmc_dma_set_xfer_param(lchan, direction, &buswidth, sg_dma_len(sg));
if (ret)
return ERR_PTR(ret);
num_items = DIV_ROUND_UP(sg_dma_len(sg), buswidth);
if (num_items >= LOONSON2_CMCDMA_MAX_DATA_ITEMS) {
dev_err(chan2dev(lchan), "Number of items not supported\n");
kfree(desc);
return ERR_PTR(-EINVAL);
}
desc->sg_req[i].len = sg_dma_len(sg);
desc->sg_req[i].chan_reg.ccr = lchan->chan_reg.ccr;
desc->sg_req[i].chan_reg.cpar = lchan->chan_reg.cpar;
desc->sg_req[i].chan_reg.cmar = sg_dma_address(sg);
desc->sg_req[i].chan_reg.cndtr = num_items;
}
desc->num_sgs = sg_len;
desc->cyclic = false;
return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
}
static struct dma_async_tx_descriptor *
loongson2_cmc_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
struct loongson2_cmc_dma_desc *desc;
enum dma_slave_buswidth buswidth;
u32 num_periods, num_items, i;
int ret;
if (unlikely(buf_len % period_len))
return ERR_PTR(-EINVAL);
ret = loongson2_cmc_dma_set_xfer_param(lchan, direction, &buswidth, period_len);
if (ret)
return ERR_PTR(ret);
num_items = DIV_ROUND_UP(period_len, buswidth);
if (num_items >= LOONSON2_CMCDMA_MAX_DATA_ITEMS) {
dev_err(chan2dev(lchan), "Number of items not supported\n");
return ERR_PTR(-EINVAL);
}
/* Enable Circular mode */
if (buf_len == period_len)
lchan->chan_reg.ccr |= LOONGSON2_CMCDMA_CCR_CIRC;
num_periods = DIV_ROUND_UP(buf_len, period_len);
desc = kzalloc_flex(*desc, sg_req, num_periods, GFP_NOWAIT);
if (!desc)
return ERR_PTR(-ENOMEM);
for (i = 0; i < num_periods; i++) {
desc->sg_req[i].len = period_len;
desc->sg_req[i].chan_reg.ccr = lchan->chan_reg.ccr;
desc->sg_req[i].chan_reg.cpar = lchan->chan_reg.cpar;
desc->sg_req[i].chan_reg.cmar = buf_addr;
desc->sg_req[i].chan_reg.cndtr = num_items;
buf_addr += period_len;
}
desc->num_sgs = num_periods;
desc->cyclic = true;
return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
}
static size_t loongson2_cmc_dma_desc_residue(struct loongson2_cmc_dma_chan *lchan,
struct loongson2_cmc_dma_desc *desc, u32 next_sg)
{
struct loongson2_cmc_dma_dev *lddev = lmdma_get_dev(lchan);
u32 residue, width, ndtr, ccr, i;
ccr = loongson2_cmc_dma_read(lddev, LOONGSON2_CMCDMA_CCR, lchan->id);
width = FIELD_GET(LOONGSON2_CMCDMA_CCR_PSIZE_MASK, ccr);
ndtr = loongson2_cmc_dma_read(lddev, LOONGSON2_CMCDMA_CNDTR, lchan->id);
residue = ndtr << width;
if (lchan->desc->cyclic && next_sg == 0)
return residue;
for (i = next_sg; i < desc->num_sgs; i++)
residue += desc->sg_req[i].len;
return residue;
}
static enum dma_status loongson2_cmc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *state)
{
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
struct virt_dma_desc *vdesc;
enum dma_status status;
status = dma_cookie_status(chan, cookie, state);
if (status == DMA_COMPLETE || !state)
return status;
scoped_guard(spinlock_irqsave, &lchan->vchan.lock) {
vdesc = vchan_find_desc(&lchan->vchan, cookie);
if (lchan->desc && cookie == lchan->desc->vdesc.tx.cookie)
state->residue = loongson2_cmc_dma_desc_residue(lchan, lchan->desc,
lchan->next_sg);
else if (vdesc)
state->residue = loongson2_cmc_dma_desc_residue(lchan,
to_lmdma_desc(vdesc), 0);
}
return status;
}
static void loongson2_cmc_dma_free_chan_resources(struct dma_chan *chan)
{
vchan_free_chan_resources(to_virt_chan(chan));
}
static void loongson2_cmc_dma_desc_free(struct virt_dma_desc *vdesc)
{
kfree(to_lmdma_desc(vdesc));
}
static bool loongson2_cmc_dma_acpi_filter(struct dma_chan *chan, void *param)
{
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
struct acpi_dma_spec *dma_spec = param;
memset(&lchan->chan_reg, 0, sizeof(struct loongson2_cmc_dma_chan_reg));
lchan->chan_reg.ccr = dma_spec->chan_id & LOONGSON2_CMCDMA_STREAM_MASK;
return true;
}
static int loongson2_cmc_dma_acpi_controller_register(struct loongson2_cmc_dma_dev *lddev)
{
struct device *dev = lddev->ddev.dev;
struct acpi_dma_filter_info *info;
if (!is_acpi_node(dev_fwnode(dev)))
return 0;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
dma_cap_zero(info->dma_cap);
info->dma_cap = lddev->ddev.cap_mask;
info->filter_fn = loongson2_cmc_dma_acpi_filter;
return devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate, info);
}
static struct dma_chan *loongson2_cmc_dma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct loongson2_cmc_dma_dev *lddev = ofdma->of_dma_data;
struct device *dev = lddev->ddev.dev;
struct loongson2_cmc_dma_chan *lchan;
struct dma_chan *chan;
if (dma_spec->args_count < 2)
return ERR_PTR(-EINVAL);
if (dma_spec->args[0] >= lddev->nr_channels) {
dev_err(dev, "Invalid channel id.\n");
return ERR_PTR(-EINVAL);
}
lchan = &lddev->chan[dma_spec->args[0]];
chan = dma_get_slave_channel(&lchan->vchan.chan);
if (!chan) {
dev_err(dev, "No more channels available.\n");
return ERR_PTR(-EINVAL);
}
memset(&lchan->chan_reg, 0, sizeof(struct loongson2_cmc_dma_chan_reg));
lchan->chan_reg.ccr = dma_spec->args[1] & LOONGSON2_CMCDMA_STREAM_MASK;
return chan;
}
static int loongson2_cmc_dma_of_controller_register(struct loongson2_cmc_dma_dev *lddev)
{
struct device *dev = lddev->ddev.dev;
if (!is_of_node(dev_fwnode(dev)))
return 0;
return of_dma_controller_register(dev->of_node, loongson2_cmc_dma_of_xlate, lddev);
}
static int loongson2_cmc_dma_probe(struct platform_device *pdev)
{
const struct loongson2_cmc_dma_config *config;
struct loongson2_cmc_dma_chan *lchan;
struct loongson2_cmc_dma_dev *lddev;
struct device *dev = &pdev->dev;
struct dma_device *ddev;
u32 nr_chans, i;
int ret;
config = (const struct loongson2_cmc_dma_config *)device_get_match_data(dev);
if (!config)
return -EINVAL;
ret = device_property_read_u32(dev, "dma-channels", &nr_chans);
if (ret || nr_chans > config->max_channels) {
dev_err(dev, "missing or invalid dma-channels property\n");
nr_chans = config->max_channels;
}
lddev = devm_kzalloc(dev, struct_size(lddev, chan, nr_chans), GFP_KERNEL);
if (!lddev)
return -ENOMEM;
lddev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lddev->base))
return PTR_ERR(lddev->base);
platform_set_drvdata(pdev, lddev);
lddev->nr_channels = nr_chans;
lddev->chan_reg_offset = config->chan_reg_offset;
lddev->dma_clk = devm_clk_get_optional_enabled(dev, NULL);
if (IS_ERR(lddev->dma_clk))
return dev_err_probe(dev, PTR_ERR(lddev->dma_clk), "Failed to get dma clock\n");
ddev = &lddev->ddev;
ddev->dev = dev;
dma_cap_zero(ddev->cap_mask);
dma_cap_set(DMA_SLAVE, ddev->cap_mask);
dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
ddev->device_free_chan_resources = loongson2_cmc_dma_free_chan_resources;
ddev->device_config = loongson2_cmc_dma_slave_config;
ddev->device_prep_slave_sg = loongson2_cmc_dma_prep_slave_sg;
ddev->device_prep_dma_cyclic = loongson2_cmc_dma_prep_dma_cyclic;
ddev->device_issue_pending = loongson2_cmc_dma_issue_pending;
ddev->device_synchronize = loongson2_cmc_dma_synchronize;
ddev->device_tx_status = loongson2_cmc_dma_tx_status;
ddev->device_terminate_all = loongson2_cmc_dma_terminate_all;
ddev->max_sg_burst = LOONSON2_CMCDMA_MAX_DATA_ITEMS;
ddev->src_addr_widths = LOONGSON2_CMCDMA_BUSWIDTHS;
ddev->dst_addr_widths = LOONGSON2_CMCDMA_BUSWIDTHS;
ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
INIT_LIST_HEAD(&ddev->channels);
for (i = 0; i < nr_chans; i++) {
lchan = &lddev->chan[i];
lchan->id = i;
lchan->vchan.desc_free = loongson2_cmc_dma_desc_free;
vchan_init(&lchan->vchan, ddev);
}
ret = dmaenginem_async_device_register(ddev);
if (ret)
return dev_err_probe(dev, ret, "Failed to register DMA engine device.\n");
for (i = 0; i < nr_chans; i++) {
lchan = &lddev->chan[i];
lchan->irq = platform_get_irq(pdev, i);
if (lchan->irq < 0)
return lchan->irq;
ret = devm_request_irq(dev, lchan->irq, loongson2_cmc_dma_chan_irq, IRQF_SHARED,
dev_name(chan2dev(lchan)), lchan);
if (ret)
return ret;
}
ret = loongson2_cmc_dma_acpi_controller_register(lddev);
if (ret)
return dev_err_probe(dev, ret, "Failed to register dma controller with ACPI.\n");
ret = loongson2_cmc_dma_of_controller_register(lddev);
if (ret)
return dev_err_probe(dev, ret, "Failed to register dma controller with FDT.\n");
dev_info(dev, "Loongson-2 Multi-Channel DMA Controller registered successfully.\n");
return 0;
}
static void loongson2_cmc_dma_remove(struct platform_device *pdev)
{
of_dma_controller_free(pdev->dev.of_node);
}
static const struct of_device_id loongson2_cmc_dma_of_match[] = {
{ .compatible = "loongson,ls2k0300-dma", .data = &ls2k0300_cmc_dma_config },
{ .compatible = "loongson,ls2k3000-dma", .data = &ls2k3000_cmc_dma_config },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, loongson2_cmc_dma_of_match);
static const struct acpi_device_id loongson2_cmc_dma_acpi_match[] = {
{ "LOON0014", .driver_data = (kernel_ulong_t)&ls2k3000_cmc_dma_config },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(acpi, loongson2_cmc_dma_acpi_match);
static struct platform_driver loongson2_cmc_dma_driver = {
.driver = {
.name = "loongson2-apb-cmc-dma",
.of_match_table = loongson2_cmc_dma_of_match,
.acpi_match_table = loongson2_cmc_dma_acpi_match,
},
.probe = loongson2_cmc_dma_probe,
.remove = loongson2_cmc_dma_remove,
};
module_platform_driver(loongson2_cmc_dma_driver);
MODULE_DESCRIPTION("Loongson-2 Chain Multi-Channel DMA Controller driver");
MODULE_AUTHOR("Loongson Technology Corporation Limited");
MODULE_LICENSE("GPL");

View File

@@ -17,8 +17,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "dmaengine.h"
#include "virt-dma.h"
#include "../dmaengine.h"
#include "../virt-dma.h"
/* Global Configuration Register */
#define LDMA_ORDER_ERG 0x0
@@ -461,12 +461,11 @@ static int ls2x_dma_slave_config(struct dma_chan *chan,
static void ls2x_dma_issue_pending(struct dma_chan *chan)
{
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&lchan->vchan.lock, flags);
guard(spinlock_irqsave)(&lchan->vchan.lock);
if (vchan_issue_pending(&lchan->vchan) && !lchan->desc)
ls2x_dma_start_transfer(lchan);
spin_unlock_irqrestore(&lchan->vchan.lock, flags);
}
/*
@@ -478,19 +477,18 @@ static void ls2x_dma_issue_pending(struct dma_chan *chan)
static int ls2x_dma_terminate_all(struct dma_chan *chan)
{
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&lchan->vchan.lock, flags);
/* Setting stop cmd */
ls2x_dma_write_cmd(lchan, LDMA_STOP);
if (lchan->desc) {
vchan_terminate_vdesc(&lchan->desc->vdesc);
lchan->desc = NULL;
}
scoped_guard(spinlock_irqsave, &lchan->vchan.lock) {
/* Setting stop cmd */
ls2x_dma_write_cmd(lchan, LDMA_STOP);
if (lchan->desc) {
vchan_terminate_vdesc(&lchan->desc->vdesc);
lchan->desc = NULL;
}
vchan_get_all_descriptors(&lchan->vchan, &head);
spin_unlock_irqrestore(&lchan->vchan.lock, flags);
vchan_get_all_descriptors(&lchan->vchan, &head);
}
vchan_dma_desc_free_list(&lchan->vchan, &head);
return 0;
@@ -511,14 +509,13 @@ static void ls2x_dma_synchronize(struct dma_chan *chan)
static int ls2x_dma_pause(struct dma_chan *chan)
{
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&lchan->vchan.lock, flags);
guard(spinlock_irqsave)(&lchan->vchan.lock);
if (lchan->desc && lchan->desc->status == DMA_IN_PROGRESS) {
ls2x_dma_write_cmd(lchan, LDMA_STOP);
lchan->desc->status = DMA_PAUSED;
}
spin_unlock_irqrestore(&lchan->vchan.lock, flags);
return 0;
}
@@ -526,14 +523,13 @@ static int ls2x_dma_pause(struct dma_chan *chan)
static int ls2x_dma_resume(struct dma_chan *chan)
{
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&lchan->vchan.lock, flags);
guard(spinlock_irqsave)(&lchan->vchan.lock);
if (lchan->desc && lchan->desc->status == DMA_PAUSED) {
lchan->desc->status = DMA_IN_PROGRESS;
ls2x_dma_write_cmd(lchan, LDMA_START);
}
spin_unlock_irqrestore(&lchan->vchan.lock, flags);
return 0;
}
@@ -550,22 +546,22 @@ static irqreturn_t ls2x_dma_isr(int irq, void *dev_id)
struct ls2x_dma_chan *lchan = dev_id;
struct ls2x_dma_desc *desc;
spin_lock(&lchan->vchan.lock);
desc = lchan->desc;
if (desc) {
if (desc->cyclic) {
vchan_cyclic_callback(&desc->vdesc);
} else {
desc->status = DMA_COMPLETE;
vchan_cookie_complete(&desc->vdesc);
ls2x_dma_start_transfer(lchan);
}
scoped_guard(spinlock, &lchan->vchan.lock) {
desc = lchan->desc;
if (desc) {
if (desc->cyclic) {
vchan_cyclic_callback(&desc->vdesc);
} else {
desc->status = DMA_COMPLETE;
vchan_cookie_complete(&desc->vdesc);
ls2x_dma_start_transfer(lchan);
}
/* ls2x_dma_start_transfer() updates lchan->desc */
if (!lchan->desc)
ls2x_dma_write_cmd(lchan, LDMA_STOP);
/* ls2x_dma_start_transfer() updates lchan->desc */
if (!lchan->desc)
ls2x_dma_write_cmd(lchan, LDMA_STOP);
}
}
spin_unlock(&lchan->vchan.lock);
return IRQ_HANDLED;
}
@@ -616,17 +612,13 @@ static int ls2x_dma_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->regs),
"devm_platform_ioremap_resource failed.\n");
priv->dma_clk = devm_clk_get(&pdev->dev, NULL);
priv->dma_clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(priv->dma_clk))
return dev_err_probe(dev, PTR_ERR(priv->dma_clk), "devm_clk_get failed.\n");
ret = clk_prepare_enable(priv->dma_clk);
if (ret)
return dev_err_probe(dev, ret, "clk_prepare_enable failed.\n");
return dev_err_probe(dev, PTR_ERR(priv->dma_clk), "Couldn't start the clock.\n");
ret = ls2x_dma_chan_init(pdev, priv);
if (ret)
goto disable_clk;
return ret;
ddev = &priv->ddev;
ddev->dev = dev;
@@ -650,25 +642,18 @@ static int ls2x_dma_probe(struct platform_device *pdev)
ddev->dst_addr_widths = LDMA_SLAVE_BUSWIDTHS;
ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
ret = dma_async_device_register(&priv->ddev);
ret = dmaenginem_async_device_register(&priv->ddev);
if (ret < 0)
goto disable_clk;
return dev_err_probe(dev, ret, "Failed to register DMA engine device.\n");
ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id, priv);
if (ret < 0)
goto unregister_dmac;
return dev_err_probe(dev, ret, "Failed to register dma controller.\n");
platform_set_drvdata(pdev, priv);
dev_info(dev, "Loongson LS2X APB DMA driver registered successfully.\n");
return 0;
unregister_dmac:
dma_async_device_unregister(&priv->ddev);
disable_clk:
clk_disable_unprepare(priv->dma_clk);
return ret;
}
/*
@@ -677,11 +662,7 @@ disable_clk:
*/
static void ls2x_dma_remove(struct platform_device *pdev)
{
struct ls2x_dma_priv *priv = platform_get_drvdata(pdev);
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&priv->ddev);
clk_disable_unprepare(priv->dma_clk);
}
static const struct of_device_id ls2x_dma_of_match_table[] = {

View File

@@ -744,20 +744,19 @@ static int mxs_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct mxs_dma_type *dma_type;
struct device *dev = &pdev->dev;
struct mxs_dma_engine *mxs_dma;
int ret, i;
mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL);
mxs_dma = devm_kzalloc(dev, sizeof(*mxs_dma), GFP_KERNEL);
if (!mxs_dma)
return -ENOMEM;
ret = of_property_read_u32(np, "dma-channels", &mxs_dma->nr_channels);
if (ret) {
dev_err(&pdev->dev, "failed to read dma-channels\n");
return ret;
}
if (ret)
return dev_err_probe(dev, ret, "failed to read dma-channels\n");
dma_type = (struct mxs_dma_type *)of_device_get_match_data(&pdev->dev);
dma_type = (struct mxs_dma_type *)of_device_get_match_data(dev);
mxs_dma->type = dma_type->type;
mxs_dma->dev_id = dma_type->id;
@@ -765,7 +764,7 @@ static int mxs_dma_probe(struct platform_device *pdev)
if (IS_ERR(mxs_dma->base))
return PTR_ERR(mxs_dma->base);
mxs_dma->clk = devm_clk_get(&pdev->dev, NULL);
mxs_dma->clk = devm_clk_get(dev, NULL);
if (IS_ERR(mxs_dma->clk))
return PTR_ERR(mxs_dma->clk);
@@ -795,10 +794,10 @@ static int mxs_dma_probe(struct platform_device *pdev)
return ret;
mxs_dma->pdev = pdev;
mxs_dma->dma_device.dev = &pdev->dev;
mxs_dma->dma_device.dev = dev;
/* mxs_dma gets 65535 bytes maximum sg size */
dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
dma_set_max_seg_size(dev, MAX_XFER_BYTES);
mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
@@ -815,18 +814,15 @@ static int mxs_dma_probe(struct platform_device *pdev)
mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
ret = dmaenginem_async_device_register(&mxs_dma->dma_device);
if (ret) {
dev_err(mxs_dma->dma_device.dev, "unable to register\n");
return ret;
}
if (ret)
return dev_err_probe(dev, ret, "unable to register\n");
ret = of_dma_controller_register(np, mxs_dma_xlate, mxs_dma);
if (ret) {
dev_err(mxs_dma->dma_device.dev,
"failed to register controller\n");
}
ret = devm_of_dma_controller_register(dev, np, mxs_dma_xlate, mxs_dma);
if (ret)
return dev_err_probe(dev, ret,
"failed to register controller\n");
dev_info(mxs_dma->dma_device.dev, "initialized\n");
dev_info(dev, "initialized\n");
return 0;
}
@@ -840,3 +836,6 @@ static struct platform_driver mxs_dma_driver = {
};
builtin_platform_driver(mxs_dma_driver);
MODULE_DESCRIPTION("MXS DMA driver");
MODULE_LICENSE("GPL");

View File

@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/irqchip/irq-renesas-rzv2h.h>
#include <linux/irqchip/irq-renesas-rzt2h.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -95,9 +96,16 @@ struct rz_dmac_icu {
u8 dmac_index;
};
struct rz_dmac_info {
void (*icu_register_dma_req)(struct platform_device *icu_dev,
u8 dmac_index, u8 dmac_channel, u16 req_no);
u16 default_dma_req_no;
};
struct rz_dmac {
struct dma_device engine;
struct rz_dmac_icu icu;
const struct rz_dmac_info *info;
struct device *dev;
struct reset_control *rstc;
void __iomem *base;
@@ -106,8 +114,6 @@ struct rz_dmac {
unsigned int n_channels;
struct rz_dmac_chan *channels;
bool has_icu;
DECLARE_BITMAP(modules, 1024);
};
@@ -118,10 +124,12 @@ struct rz_dmac {
* Registers
*/
#define CRTB 0x0020
#define CHSTAT 0x0024
#define CHCTRL 0x0028
#define CHCFG 0x002c
#define NXLA 0x0038
#define CRLA 0x003c
#define DCTRL 0x0000
@@ -132,10 +140,12 @@ struct rz_dmac {
#define CHANNEL_8_15_COMMON_BASE 0x0700
#define CHSTAT_ER BIT(4)
#define CHSTAT_SUS BIT(3)
#define CHSTAT_EN BIT(0)
#define CHCTRL_CLRINTMSK BIT(17)
#define CHCTRL_CLRSUS BIT(9)
#define CHCTRL_SETSUS BIT(8)
#define CHCTRL_CLRTC BIT(6)
#define CHCTRL_CLREND BIT(5)
#define CHCTRL_CLRRQ BIT(4)
@@ -266,15 +276,12 @@ static void rz_dmac_enable_hw(struct rz_dmac_chan *channel)
{
struct dma_chan *chan = &channel->vc.chan;
struct rz_dmac *dmac = to_rz_dmac(chan->device);
unsigned long flags;
u32 nxla;
u32 chctrl;
u32 chstat;
dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
local_irq_save(flags);
rz_dmac_lmdesc_recycle(channel);
nxla = channel->lmdesc.base_dma +
@@ -289,8 +296,6 @@ static void rz_dmac_enable_hw(struct rz_dmac_chan *channel)
rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1);
rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1);
}
local_irq_restore(flags);
}
static void rz_dmac_disable_hw(struct rz_dmac_chan *channel)
@@ -316,6 +321,16 @@ static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars)
rz_dmac_ext_writel(dmac, dmars32, dmars_offset);
}
static void rz_dmac_set_dma_req_no(struct rz_dmac *dmac, unsigned int index,
int req_no)
{
if (dmac->info->icu_register_dma_req)
dmac->info->icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
index, req_no);
else
rz_dmac_set_dmars_register(dmac, index, req_no);
}
static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel)
{
struct dma_chan *chan = &channel->vc.chan;
@@ -333,13 +348,7 @@ static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel)
lmdesc->chext = 0;
lmdesc->header = HEADER_LV;
if (dmac->has_icu) {
rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
channel->index,
RZV2H_ICU_DMAC_REQ_NO_DEFAULT);
} else {
rz_dmac_set_dmars_register(dmac, channel->index, 0);
}
rz_dmac_set_dma_req_no(dmac, channel->index, dmac->info->default_dma_req_no);
channel->chcfg = chcfg;
channel->chctrl = CHCTRL_STG | CHCTRL_SETEN;
@@ -390,12 +399,7 @@ static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel)
channel->lmdesc.tail = lmdesc;
if (dmac->has_icu) {
rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
channel->index, channel->mid_rid);
} else {
rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
}
rz_dmac_set_dma_req_no(dmac, channel->index, channel->mid_rid);
channel->chctrl = CHCTRL_SETEN;
}
@@ -460,15 +464,12 @@ static void rz_dmac_free_chan_resources(struct dma_chan *chan)
{
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
struct rz_dmac *dmac = to_rz_dmac(chan->device);
struct rz_lmdesc *lmdesc = channel->lmdesc.base;
struct rz_dmac_desc *desc, *_desc;
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&channel->vc.lock, flags);
for (i = 0; i < DMAC_NR_LMDESC; i++)
lmdesc[i].header = 0;
rz_lmdesc_setup(channel, channel->lmdesc.base);
rz_dmac_disable_hw(channel);
list_splice_tail_init(&channel->ld_active, &channel->ld_free);
@@ -560,15 +561,12 @@ rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
static int rz_dmac_terminate_all(struct dma_chan *chan)
{
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
struct rz_lmdesc *lmdesc = channel->lmdesc.base;
unsigned long flags;
unsigned int i;
LIST_HEAD(head);
spin_lock_irqsave(&channel->vc.lock, flags);
rz_dmac_disable_hw(channel);
for (i = 0; i < DMAC_NR_LMDESC; i++)
lmdesc[i].header = 0;
rz_lmdesc_setup(channel, channel->lmdesc.base);
list_splice_tail_init(&channel->ld_active, &channel->ld_free);
list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
@@ -679,13 +677,185 @@ static void rz_dmac_device_synchronize(struct dma_chan *chan)
if (ret < 0)
dev_warn(dmac->dev, "DMA Timeout");
if (dmac->has_icu) {
rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
channel->index,
RZV2H_ICU_DMAC_REQ_NO_DEFAULT);
} else {
rz_dmac_set_dmars_register(dmac, channel->index, 0);
rz_dmac_set_dma_req_no(dmac, channel->index, dmac->info->default_dma_req_no);
}
static struct rz_lmdesc *
rz_dmac_get_next_lmdesc(struct rz_lmdesc *base, struct rz_lmdesc *lmdesc)
{
struct rz_lmdesc *next = ++lmdesc;
if (next >= base + DMAC_NR_LMDESC)
next = base;
return next;
}
static u32 rz_dmac_calculate_residue_bytes_in_vd(struct rz_dmac_chan *channel, u32 crla)
{
struct rz_lmdesc *lmdesc = channel->lmdesc.head;
struct dma_chan *chan = &channel->vc.chan;
struct rz_dmac *dmac = to_rz_dmac(chan->device);
u32 residue = 0, i = 0;
while (lmdesc->nxla != crla) {
lmdesc = rz_dmac_get_next_lmdesc(channel->lmdesc.base, lmdesc);
if (++i >= DMAC_NR_LMDESC)
return 0;
}
/* Calculate residue from next lmdesc to end of virtual desc */
while (lmdesc->chcfg & CHCFG_DEM) {
residue += lmdesc->tb;
lmdesc = rz_dmac_get_next_lmdesc(channel->lmdesc.base, lmdesc);
}
dev_dbg(dmac->dev, "%s: VD residue is %u\n", __func__, residue);
return residue;
}
static u32 rz_dmac_chan_get_residue(struct rz_dmac_chan *channel,
dma_cookie_t cookie)
{
struct rz_dmac_desc *current_desc, *desc;
enum dma_status status;
u32 crla, crtb, i;
/* Get current processing virtual descriptor */
current_desc = list_first_entry(&channel->ld_active,
struct rz_dmac_desc, node);
if (!current_desc)
return 0;
/*
* If the cookie corresponds to a descriptor that has been completed
* there is no residue. The same check has already been performed by the
* caller but without holding the channel lock, so the descriptor could
* now be complete.
*/
status = dma_cookie_status(&channel->vc.chan, cookie, NULL);
if (status == DMA_COMPLETE)
return 0;
/*
* If the cookie doesn't correspond to the currently processing virtual
* descriptor then the descriptor hasn't been processed yet, and the
* residue is equal to the full descriptor size. Also, a client driver
* is possible to call this function before rz_dmac_irq_handler_thread()
* runs. In this case, the running descriptor will be the next
* descriptor, and will appear in the done list. So, if the argument
* cookie matches the done list's cookie, we can assume the residue is
* zero.
*/
if (cookie != current_desc->vd.tx.cookie) {
list_for_each_entry(desc, &channel->ld_free, node) {
if (cookie == desc->vd.tx.cookie)
return 0;
}
list_for_each_entry(desc, &channel->ld_queue, node) {
if (cookie == desc->vd.tx.cookie)
return desc->len;
}
list_for_each_entry(desc, &channel->ld_active, node) {
if (cookie == desc->vd.tx.cookie)
return desc->len;
}
/*
* No descriptor found for the cookie, there's thus no residue.
* This shouldn't happen if the calling driver passes a correct
* cookie value.
*/
WARN(1, "No descriptor for cookie!");
return 0;
}
/*
* We need to read two registers. Make sure the hardware does not move
* to next lmdesc while reading the current lmdesc. Trying it 3 times
* should be enough: initial read, retry, retry for the paranoid.
*/
for (i = 0; i < 3; i++) {
crla = rz_dmac_ch_readl(channel, CRLA, 1);
crtb = rz_dmac_ch_readl(channel, CRTB, 1);
/* Still the same? */
if (crla == rz_dmac_ch_readl(channel, CRLA, 1))
break;
}
WARN_ONCE(i >= 3, "residue might not be continuous!");
/*
* Calculate number of bytes transferred in processing virtual descriptor.
* One virtual descriptor can have many lmdesc.
*/
return crtb + rz_dmac_calculate_residue_bytes_in_vd(channel, crla);
}
static enum dma_status rz_dmac_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
enum dma_status status;
u32 residue;
status = dma_cookie_status(chan, cookie, txstate);
if (status == DMA_COMPLETE || !txstate)
return status;
scoped_guard(spinlock_irqsave, &channel->vc.lock) {
u32 val;
residue = rz_dmac_chan_get_residue(channel, cookie);
val = rz_dmac_ch_readl(channel, CHSTAT, 1);
if (val & CHSTAT_SUS)
status = DMA_PAUSED;
}
/* if there's no residue and no paused, the cookie is complete */
if (!residue && status != DMA_PAUSED)
return DMA_COMPLETE;
dma_set_residue(txstate, residue);
return status;
}
static int rz_dmac_device_pause(struct dma_chan *chan)
{
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
u32 val;
guard(spinlock_irqsave)(&channel->vc.lock);
val = rz_dmac_ch_readl(channel, CHSTAT, 1);
if (!(val & CHSTAT_EN))
return 0;
rz_dmac_ch_writel(channel, CHCTRL_SETSUS, CHCTRL, 1);
return read_poll_timeout_atomic(rz_dmac_ch_readl, val,
(val & CHSTAT_SUS), 1, 1024,
false, channel, CHSTAT, 1);
}
static int rz_dmac_device_resume(struct dma_chan *chan)
{
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
u32 val;
guard(spinlock_irqsave)(&channel->vc.lock);
/* Do not check CHSTAT_SUS but rely on HW capabilities. */
rz_dmac_ch_writel(channel, CHCTRL_CLRSUS, CHCTRL, 1);
return read_poll_timeout_atomic(rz_dmac_ch_readl, val,
!(val & CHSTAT_SUS), 1, 1024,
false, channel, CHSTAT, 1);
}
/*
@@ -697,7 +867,7 @@ static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel)
{
struct dma_chan *chan = &channel->vc.chan;
struct rz_dmac *dmac = to_rz_dmac(chan->device);
u32 chstat, chctrl;
u32 chstat;
chstat = rz_dmac_ch_readl(channel, CHSTAT, 1);
if (chstat & CHSTAT_ER) {
@@ -706,13 +876,14 @@ static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel)
scoped_guard(spinlock_irqsave, &channel->vc.lock)
rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
goto done;
return;
}
chctrl = rz_dmac_ch_readl(channel, CHCTRL, 1);
rz_dmac_ch_writel(channel, chctrl | CHCTRL_CLREND, CHCTRL, 1);
done:
return;
/*
* No need to lock. This just clears the END interrupt. Writing
* zeros to CHCTRL is just ignored by HW.
*/
rz_dmac_ch_writel(channel, CHCTRL_CLREND, CHCTRL, 1);
}
static irqreturn_t rz_dmac_irq_handler(int irq, void *dev_id)
@@ -876,14 +1047,13 @@ static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
uint32_t dmac_index;
int ret;
ret = of_parse_phandle_with_fixed_args(np, "renesas,icu", 1, 0, &args);
if (ret == -ENOENT)
if (!dmac->info->icu_register_dma_req)
return 0;
ret = of_parse_phandle_with_fixed_args(np, "renesas,icu", 1, 0, &args);
if (ret)
return ret;
dmac->has_icu = true;
dmac->icu.pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
if (!dmac->icu.pdev) {
@@ -938,6 +1108,7 @@ static int rz_dmac_probe(struct platform_device *pdev)
if (!dmac)
return -ENOMEM;
dmac->info = device_get_match_data(&pdev->dev);
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
@@ -955,23 +1126,22 @@ static int rz_dmac_probe(struct platform_device *pdev)
if (IS_ERR(dmac->base))
return PTR_ERR(dmac->base);
if (!dmac->has_icu) {
if (!dmac->info->icu_register_dma_req) {
dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dmac->ext_base))
return PTR_ERR(dmac->ext_base);
}
/* Register interrupt handler for error */
irq = platform_get_irq_byname(pdev, irqname);
if (irq < 0)
return irq;
ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0,
irqname, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
irq, ret);
return ret;
irq = platform_get_irq_byname_optional(pdev, irqname);
if (irq > 0) {
ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0,
irqname, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
irq, ret);
return ret;
}
}
/* Initialize the channels. */
@@ -1009,6 +1179,7 @@ static int rz_dmac_probe(struct platform_device *pdev)
engine = &dmac->engine;
dma_cap_set(DMA_SLAVE, engine->cap_mask);
dma_cap_set(DMA_MEMCPY, engine->cap_mask);
engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL);
rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL);
@@ -1016,13 +1187,15 @@ static int rz_dmac_probe(struct platform_device *pdev)
engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources;
engine->device_free_chan_resources = rz_dmac_free_chan_resources;
engine->device_tx_status = dma_cookie_status;
engine->device_tx_status = rz_dmac_tx_status;
engine->device_prep_slave_sg = rz_dmac_prep_slave_sg;
engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy;
engine->device_config = rz_dmac_config;
engine->device_terminate_all = rz_dmac_terminate_all;
engine->device_issue_pending = rz_dmac_issue_pending;
engine->device_synchronize = rz_dmac_device_synchronize;
engine->device_pause = rz_dmac_device_pause;
engine->device_resume = rz_dmac_device_resume;
engine->copy_align = DMAENGINE_ALIGN_1_BYTE;
dma_set_max_seg_size(engine->dev, U32_MAX);
@@ -1076,9 +1249,24 @@ static void rz_dmac_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
static const struct rz_dmac_info rz_dmac_v2h_info = {
.icu_register_dma_req = rzv2h_icu_register_dma_req,
.default_dma_req_no = RZV2H_ICU_DMAC_REQ_NO_DEFAULT,
};
static const struct rz_dmac_info rz_dmac_t2h_info = {
.icu_register_dma_req = rzt2h_icu_register_dma_req,
.default_dma_req_no = RZT2H_ICU_DMAC_REQ_NO_DEFAULT,
};
static const struct rz_dmac_info rz_dmac_generic_info = {
.default_dma_req_no = 0,
};
static const struct of_device_id of_rz_dmac_match[] = {
{ .compatible = "renesas,r9a09g057-dmac", },
{ .compatible = "renesas,rz-dmac", },
{ .compatible = "renesas,r9a09g057-dmac", .data = &rz_dmac_v2h_info },
{ .compatible = "renesas,r9a09g077-dmac", .data = &rz_dmac_t2h_info },
{ .compatible = "renesas,rz-dmac", .data = &rz_dmac_generic_info },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_rz_dmac_match);

1437
drivers/dma/switchtec_dma.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -61,6 +61,8 @@ struct xdma_desc_block {
* @dir: Transferring direction of the channel
* @cfg: Transferring config of the channel
* @irq: IRQ assigned to the channel
* @last_interrupt: task for comppleting last interrupt
* @stop_requested: stop request flag
*/
struct xdma_chan {
struct virt_dma_chan vchan;

View File

@@ -3194,7 +3194,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
= axivdma_clk_init;
struct device_node *node = pdev->dev.of_node;
struct xilinx_dma_device *xdev;
struct device_node *child, *np = pdev->dev.of_node;
struct device_node *np = pdev->dev.of_node;
u32 num_frames, addr_width = XILINX_DMA_DFAULT_ADDRWIDTH, len_width;
int i, err;
@@ -3334,12 +3334,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, xdev);
/* Initialize the channels */
for_each_child_of_node(node, child) {
for_each_child_of_node_scoped(node, child) {
err = xilinx_dma_child_probe(xdev, child);
if (err < 0) {
of_node_put(child);
if (err < 0)
goto error;
}
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {

View File

@@ -73,6 +73,8 @@ enum dw_edma_chip_flags {
* @ll_region_rd: DMA descriptor link list memory for read channel
* @dt_region_wr: DMA data memory for write channel
* @dt_region_rd: DMA data memory for read channel
* @db_irq: Virtual IRQ dedicated to interrupt emulation
* @db_offset: Offset from DMA register base
* @mf: DMA register map format
* @dw: struct dw_edma that is filled by dw_edma_probe()
*/
@@ -94,9 +96,14 @@ struct dw_edma_chip {
struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH];
struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH];
/* interrupt emulation */
int db_irq;
resource_size_t db_offset;
enum dw_edma_map_format mf;
struct dw_edma *dw;
bool cfg_non_ll;
};
/* Export to the platform drivers */

View File

@@ -8,6 +8,9 @@
/**
* enum spi_transfer_cmd - spi transfer commands
* @SPI_TX: SPI peripheral TX command
* @SPI_RX: SPI peripheral RX command
* @SPI_DUPLEX: SPI peripheral Duplex command
*/
enum spi_transfer_cmd {
SPI_TX = 1,
@@ -64,7 +67,7 @@ enum i2c_op {
* @set_config: set peripheral config
* @rx_len: receive length for buffer
* @op: i2c cmd
* @muli-msg: is part of multi i2c r-w msgs
* @multi_msg: is part of multi i2c r-w msgs
*/
struct gpi_i2c_config {
u8 set_config;

View File

@@ -16,8 +16,8 @@
* struct cppi5_desc_hdr_t - Descriptor header, present in all types of
* descriptors
* @pkt_info0: Packet info word 0 (n/a in Buffer desc)
* @pkt_info0: Packet info word 1 (n/a in Buffer desc)
* @pkt_info0: Packet info word 2 (n/a in Buffer desc)
* @pkt_info1: Packet info word 1 (n/a in Buffer desc)
* @pkt_info2: Packet info word 2 (n/a in Buffer desc)
* @src_dst_tag: Packet info word 3 (n/a in Buffer desc)
*/
struct cppi5_desc_hdr_t {
@@ -35,7 +35,7 @@ struct cppi5_desc_hdr_t {
* @buf_info1: word 8: Buffer valid data length
* @org_buf_len: word 9: Original buffer length
* @org_buf_ptr: word 10/11: Original buffer pointer
* @epib[0]: Extended Packet Info Data (optional, 4 words), and/or
* @epib: Extended Packet Info Data (optional, 4 words), and/or
* Protocol Specific Data (optional, 0-128 bytes in
* multiples of 4), and/or
* Other Software Data (0-N bytes, optional)
@@ -132,7 +132,7 @@ struct cppi5_desc_epib_t {
/**
* struct cppi5_monolithic_desc_t - Monolithic-mode packet descriptor
* @hdr: Descriptor header
* @epib[0]: Extended Packet Info Data (optional, 4 words), and/or
* @epib: Extended Packet Info Data (optional, 4 words), and/or
* Protocol Specific Data (optional, 0-128 bytes in
* multiples of 4), and/or
* Other Software Data (0-N bytes, optional)
@@ -179,7 +179,7 @@ static inline void cppi5_desc_dump(void *desc, u32 size)
* cppi5_desc_is_tdcm - check if the paddr indicates Teardown Complete Message
* @paddr: Physical address of the packet popped from the ring
*
* Returns true if the address indicates TDCM
* Returns: true if the address indicates TDCM
*/
static inline bool cppi5_desc_is_tdcm(dma_addr_t paddr)
{
@@ -190,7 +190,7 @@ static inline bool cppi5_desc_is_tdcm(dma_addr_t paddr)
* cppi5_desc_get_type - get descriptor type
* @desc_hdr: packet descriptor/TR header
*
* Returns descriptor type:
* Returns: descriptor type:
* CPPI5_INFO0_DESC_TYPE_VAL_HOST
* CPPI5_INFO0_DESC_TYPE_VAL_MONO
* CPPI5_INFO0_DESC_TYPE_VAL_TR
@@ -205,7 +205,7 @@ static inline u32 cppi5_desc_get_type(struct cppi5_desc_hdr_t *desc_hdr)
* cppi5_desc_get_errflags - get Error Flags from Desc
* @desc_hdr: packet/TR descriptor header
*
* Returns Error Flags from Packet/TR Descriptor
* Returns: Error Flags from Packet/TR Descriptor
*/
static inline u32 cppi5_desc_get_errflags(struct cppi5_desc_hdr_t *desc_hdr)
{
@@ -307,7 +307,7 @@ static inline void cppi5_desc_set_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
* @psdata_size: PSDATA size
* @sw_data_size: SWDATA size
*
* Returns required Host Packet Descriptor size
* Returns: required Host Packet Descriptor size
* 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
*/
static inline u32 cppi5_hdesc_calc_size(bool epib, u32 psdata_size,
@@ -381,6 +381,8 @@ cppi5_hdesc_update_psdata_size(struct cppi5_host_desc_t *desc, u32 psdata_size)
/**
* cppi5_hdesc_get_psdata_size - get PSdata size in bytes
* @desc: Host packet descriptor
*
* Returns: PSdata size in bytes
*/
static inline u32 cppi5_hdesc_get_psdata_size(struct cppi5_host_desc_t *desc)
{
@@ -398,7 +400,7 @@ static inline u32 cppi5_hdesc_get_psdata_size(struct cppi5_host_desc_t *desc)
* cppi5_hdesc_get_pktlen - get Packet Length from HDesc
* @desc: Host packet descriptor
*
* Returns Packet Length from Host Packet Descriptor
* Returns: Packet Length from Host Packet Descriptor
*/
static inline u32 cppi5_hdesc_get_pktlen(struct cppi5_host_desc_t *desc)
{
@@ -408,6 +410,7 @@ static inline u32 cppi5_hdesc_get_pktlen(struct cppi5_host_desc_t *desc)
/**
* cppi5_hdesc_set_pktlen - set Packet Length in HDesc
* @desc: Host packet descriptor
* @pkt_len: Packet length to set
*/
static inline void cppi5_hdesc_set_pktlen(struct cppi5_host_desc_t *desc,
u32 pkt_len)
@@ -420,7 +423,7 @@ static inline void cppi5_hdesc_set_pktlen(struct cppi5_host_desc_t *desc,
* cppi5_hdesc_get_psflags - get Protocol Specific Flags from HDesc
* @desc: Host packet descriptor
*
* Returns Protocol Specific Flags from Host Packet Descriptor
* Returns: Protocol Specific Flags from Host Packet Descriptor
*/
static inline u32 cppi5_hdesc_get_psflags(struct cppi5_host_desc_t *desc)
{
@@ -431,6 +434,7 @@ static inline u32 cppi5_hdesc_get_psflags(struct cppi5_host_desc_t *desc)
/**
* cppi5_hdesc_set_psflags - set Protocol Specific Flags in HDesc
* @desc: Host packet descriptor
* @ps_flags: Protocol Specific flags to set
*/
static inline void cppi5_hdesc_set_psflags(struct cppi5_host_desc_t *desc,
u32 ps_flags)
@@ -442,8 +446,10 @@ static inline void cppi5_hdesc_set_psflags(struct cppi5_host_desc_t *desc,
}
/**
* cppi5_hdesc_get_errflags - get Packet Type from HDesc
* cppi5_hdesc_get_pkttype - get Packet Type from HDesc
* @desc: Host packet descriptor
*
* Returns: Packet type
*/
static inline u32 cppi5_hdesc_get_pkttype(struct cppi5_host_desc_t *desc)
{
@@ -452,7 +458,7 @@ static inline u32 cppi5_hdesc_get_pkttype(struct cppi5_host_desc_t *desc)
}
/**
* cppi5_hdesc_get_errflags - set Packet Type in HDesc
* cppi5_hdesc_set_pkttype - set Packet Type in HDesc
* @desc: Host packet descriptor
* @pkt_type: Packet Type
*/
@@ -501,7 +507,7 @@ static inline void cppi5_hdesc_reset_to_original(struct cppi5_host_desc_t *desc)
/**
* cppi5_hdesc_link_hbdesc - link Host Buffer Descriptor to HDesc
* @desc: Host Packet Descriptor
* @buf_desc: Host Buffer Descriptor physical address
* @hbuf_desc: Host Buffer Descriptor physical address
*
* add and link Host Buffer Descriptor to HDesc
*/
@@ -527,7 +533,7 @@ static inline void cppi5_hdesc_reset_hbdesc(struct cppi5_host_desc_t *desc)
* cppi5_hdesc_epib_present - check if EPIB present
* @desc_hdr: packet descriptor/TR header
*
* Returns true if EPIB present in the packet
* Returns: true if EPIB present in the packet
*/
static inline bool cppi5_hdesc_epib_present(struct cppi5_desc_hdr_t *desc_hdr)
{
@@ -538,7 +544,7 @@ static inline bool cppi5_hdesc_epib_present(struct cppi5_desc_hdr_t *desc_hdr)
* cppi5_hdesc_get_psdata - Get pointer on PSDATA
* @desc: Host packet descriptor
*
* Returns pointer on PSDATA in HDesc.
* Returns: pointer on PSDATA in HDesc.
* NULL - if ps_data placed at the start of data buffer.
*/
static inline void *cppi5_hdesc_get_psdata(struct cppi5_host_desc_t *desc)
@@ -568,7 +574,7 @@ static inline void *cppi5_hdesc_get_psdata(struct cppi5_host_desc_t *desc)
* cppi5_hdesc_get_swdata - Get pointer on swdata
* @desc: Host packet descriptor
*
* Returns pointer on SWDATA in HDesc.
* Returns: pointer on SWDATA in HDesc.
* NOTE. It's caller responsibility to be sure hdesc actually has swdata.
*/
static inline void *cppi5_hdesc_get_swdata(struct cppi5_host_desc_t *desc)
@@ -648,6 +654,7 @@ enum cppi5_tr_types {
CPPI5_TR_TYPE11,
/* type12-14: Reserved */
CPPI5_TR_TYPE15 = 15,
/* private: */
CPPI5_TR_TYPE_MAX
};
@@ -673,6 +680,7 @@ enum cppi5_tr_event_size {
CPPI5_TR_EVENT_SIZE_ICNT1_DEC,
CPPI5_TR_EVENT_SIZE_ICNT2_DEC,
CPPI5_TR_EVENT_SIZE_ICNT3_DEC,
/* private: */
CPPI5_TR_EVENT_SIZE_MAX
};
@@ -690,6 +698,7 @@ enum cppi5_tr_trigger {
CPPI5_TR_TRIGGER_GLOBAL0,
CPPI5_TR_TRIGGER_GLOBAL1,
CPPI5_TR_TRIGGER_LOCAL_EVENT,
/* private: */
CPPI5_TR_TRIGGER_MAX
};
@@ -711,6 +720,7 @@ enum cppi5_tr_trigger_type {
CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC,
CPPI5_TR_TRIGGER_TYPE_ALL,
/* private: */
CPPI5_TR_TRIGGER_TYPE_MAX
};
@@ -815,7 +825,7 @@ struct cppi5_tr_type3_t {
* destination
* @dicnt1: Total loop iteration count for level 1 for destination
* @dicnt2: Total loop iteration count for level 2 for destination
* @sicnt3: Total loop iteration count for level 3 (outermost) for
* @dicnt3: Total loop iteration count for level 3 (outermost) for
* destination
*/
struct cppi5_tr_type15_t {
@@ -887,6 +897,7 @@ enum cppi5_tr_resp_status_type {
CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR,
CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION,
CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH,
/* private: */
CPPI5_TR_RESPONSE_STATUS_MAX
};
@@ -903,6 +914,7 @@ enum cppi5_tr_resp_status_submission {
CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0,
CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL,
CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN,
/* private: */
CPPI5_TR_RESPONSE_STATUS_SUBMISSION_MAX
};
@@ -931,6 +943,7 @@ enum cppi5_tr_resp_status_unsupported {
CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT,
CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR,
CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC,
/* private: */
CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_MAX
};
@@ -939,7 +952,7 @@ enum cppi5_tr_resp_status_unsupported {
* @tr_count: number of TR records
* @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
*
* Returns required TR Descriptor size
* Returns: required TR Descriptor size
*/
static inline size_t cppi5_trdesc_calc_size(u32 tr_count, u32 tr_size)
{
@@ -955,7 +968,7 @@ static inline size_t cppi5_trdesc_calc_size(u32 tr_count, u32 tr_size)
/**
* cppi5_trdesc_init - Init TR Descriptor
* @desc: TR Descriptor
* @desc_hdr: TR Descriptor
* @tr_count: number of TR records
* @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
* @reload_idx: Absolute index to jump to on the 2nd and following passes
@@ -1044,7 +1057,7 @@ static inline void cppi5_tr_set_trigger(cppi5_tr_flags_t *flags,
}
/**
* cppi5_tr_cflag_set - Update the Configuration specific flags
* cppi5_tr_csf_set - Update the Configuration specific flags
* @flags: Pointer to the TR's flags
* @csf: Configuration specific flags
*

View File

@@ -996,7 +996,8 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
* @vecs: The array of DMA vectors that should be transferred
* @nents: The number of DMA vectors in the array
* @dir: Specifies the direction of the data transfer
* @flags: DMA engine flags
* @flags: DMA engine flags - DMA_PREP_REPEAT can be used to mark a cyclic
* DMA transfer
*/
static inline struct dma_async_tx_descriptor *dmaengine_prep_peripheral_dma_vec(
struct dma_chan *chan, const struct dma_vec *vecs, size_t nents,

View File

@@ -38,6 +38,26 @@ extern int of_dma_controller_register(struct device_node *np,
void *data);
extern void of_dma_controller_free(struct device_node *np);
static void __of_dma_controller_free(void *np)
{
of_dma_controller_free(np);
}
static inline int
devm_of_dma_controller_register(struct device *dev, struct device_node *np,
struct dma_chan *(*of_dma_xlate)
(struct of_phandle_args *, struct of_dma *),
void *data)
{
int ret;
ret = of_dma_controller_register(np, of_dma_xlate, data);
if (ret)
return ret;
return devm_add_action_or_reset(dev, __of_dma_controller_free, np);
}
extern int of_dma_router_register(struct device_node *np,
void *(*of_dma_route_allocate)
(struct of_phandle_args *, struct of_dma *),
@@ -64,6 +84,15 @@ static inline void of_dma_controller_free(struct device_node *np)
{
}
static inline int
devm_of_dma_controller_register(struct device *dev, struct device_node *np,
struct dma_chan *(*of_dma_xlate)
(struct of_phandle_args *, struct of_dma *),
void *data)
{
return -ENODEV;
}
static inline int of_dma_router_register(struct device_node *np,
void *(*of_dma_route_allocate)
(struct of_phandle_args *, struct of_dma *),

View File

@@ -26,8 +26,9 @@ bool mcf_edma_filter_fn(struct dma_chan *chan, void *param);
/**
* struct mcf_edma_platform_data - platform specific data for eDMA engine
*
* @ver The eDMA module version.
* @dma_channels The number of eDMA channels.
* @dma_channels: The number of eDMA channels.
* @slave_map: Slave device map
* @slavecnt: Number of entries in @slave_map
*/
struct mcf_edma_platform_data {
int dma_channels;