diff --git a/Documentation/devicetree/bindings/dma/8250_mtk_dma.txt b/Documentation/devicetree/bindings/dma/8250_mtk_dma.txt
deleted file mode 100644
index 3fe0961bcf64fdd360067f9c1d9ac42bb937897e..0000000000000000000000000000000000000000
--- a/Documentation/devicetree/bindings/dma/8250_mtk_dma.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-* Mediatek UART APDMA Controller
-
-Required properties:
-- compatible should contain:
-  * "mediatek,mt2712-uart-dma" for MT2712 compatible APDMA
-  * "mediatek,mt6577-uart-dma" for MT6577 and all of the above
-
-- reg: The base address of the APDMA register bank.
-
-- interrupts: A single interrupt specifier.
-
-- clocks : Must contain an entry for each entry in clock-names.
-  See ../clocks/clock-bindings.txt for details.
-- clock-names: The APDMA clock for register accesses
-
-Examples:
-
-	apdma: dma-controller@11000380 {
-		compatible = "mediatek,mt2712-uart-dma";
-		reg = <0 0x11000380 0 0x400>;
-		interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_LOW>,
-			     <GIC_SPI 64 IRQ_TYPE_LEVEL_LOW>,
-			     <GIC_SPI 65 IRQ_TYPE_LEVEL_LOW>,
-			     <GIC_SPI 66 IRQ_TYPE_LEVEL_LOW>,
-			     <GIC_SPI 67 IRQ_TYPE_LEVEL_LOW>,
-			     <GIC_SPI 68 IRQ_TYPE_LEVEL_LOW>,
-			     <GIC_SPI 69 IRQ_TYPE_LEVEL_LOW>,
-			     <GIC_SPI 70 IRQ_TYPE_LEVEL_LOW>;
-		clocks = <&pericfg CLK_PERI_AP_DMA>;
-		clock-names = "apdma";
-		#dma-cells = <1>;
-	};
-
diff --git a/Documentation/devicetree/bindings/dma/arm-pl330.txt b/Documentation/devicetree/bindings/dma/arm-pl330.txt
index db7e2260f9c5749e510a19e6d275e7bbfdaff467..2c7fd1941abbbc8e9be9d65ece4f10a275b47c83 100644
--- a/Documentation/devicetree/bindings/dma/arm-pl330.txt
+++ b/Documentation/devicetree/bindings/dma/arm-pl330.txt
@@ -16,6 +16,9 @@ Optional properties:
   - dma-channels: contains the total number of DMA channels supported by the DMAC
   - dma-requests: contains the total number of DMA requests supported by the DMAC
   - arm,pl330-broken-no-flushp: quirk for avoiding to execute DMAFLUSHP
+  - resets: contains an entry for each entry in reset-names.
+	    See ../reset/reset.txt for details.
+  - reset-names: must contain at least "dma", and optional is "dma-ocp".
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/dma/fsl-edma.txt b/Documentation/devicetree/bindings/dma/fsl-edma.txt
index 97e213e07660816a2f81d19f400b104e09b1fe90..29dd3ccb1235c59a92eedc96a1bc79442b28f672 100644
--- a/Documentation/devicetree/bindings/dma/fsl-edma.txt
+++ b/Documentation/devicetree/bindings/dma/fsl-edma.txt
@@ -9,15 +9,16 @@ group, DMAMUX0 or DMAMUX1, but not both.
 Required properties:
 - compatible :
 	- "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC
+	- "fsl,imx7ulp-edma" for eDMA2 used similar to that on i.mx7ulp
 - reg : Specifies base physical address(s) and size of the eDMA registers.
 	The 1st region is eDMA control register's address and size.
 	The 2nd and the 3rd regions are programmable channel multiplexing
 	control register's address and size.
 - interrupts : A list of interrupt-specifiers, one for each entry in
-	interrupt-names.
-- interrupt-names : Should contain:
-	"edma-tx" - the transmission interrupt
-	"edma-err" - the error interrupt
+	interrupt-names on vf610 similar SoC. But for i.mx7ulp per channel
+	per transmission interrupt, total 16 channel interrupt and 1
+	error interrupt(located in the last), no interrupt-names list on
+	i.mx7ulp for clean on dts.
 - #dma-cells : Must be <2>.
 	The 1st cell specifies the DMAMUX(0 for DMAMUX0 and 1 for DMAMUX1).
 	Specific request source can only be multiplexed by specific channels
@@ -28,6 +29,7 @@ Required properties:
 - clock-names : A list of channel group clock names. Should contain:
 	"dmamux0" - clock name of mux0 group
 	"dmamux1" - clock name of mux1 group
+	Note: No dmamux0 on i.mx7ulp, but another 'dma' clk added on i.mx7ulp.
 - clocks : A list of phandle and clock-specifier pairs, one for each entry in
 	clock-names.
 
@@ -35,6 +37,10 @@ Optional properties:
 - big-endian: If present registers and hardware scatter/gather descriptors
 	of the eDMA are implemented in big endian mode, otherwise in little
 	mode.
+- interrupt-names : Should contain the below on vf610 similar SoC but not used
+	on i.mx7ulp similar SoC:
+	"edma-tx" - the transmission interrupt
+	"edma-err" - the error interrupt
 
 
 Examples:
@@ -52,8 +58,36 @@ edma0: dma-controller@40018000 {
 	clock-names = "dmamux0", "dmamux1";
 	clocks = <&clks VF610_CLK_DMAMUX0>,
 		<&clks VF610_CLK_DMAMUX1>;
-};
+}; /* vf610 */
 
+edma1: dma-controller@40080000 {
+	#dma-cells = <2>;
+	compatible = "fsl,imx7ulp-edma";
+	reg = <0x40080000 0x2000>,
+		<0x40210000 0x1000>;
+	dma-channels = <32>;
+	interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+		     /* last is eDMA2-ERR interrupt */
+		     <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+	clock-names = "dma", "dmamux0";
+	clocks = <&pcc2 IMX7ULP_CLK_DMA1>,
+		 <&pcc2 IMX7ULP_CLK_DMA_MUX1>;
+}; /* i.mx7ulp */
 
 * DMA clients
 DMA client drivers that uses the DMA function must use the format described
diff --git a/Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt b/Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5d6f98c43e3df0c80c698b1daca6953bc1be31f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt
@@ -0,0 +1,54 @@
+* Mediatek UART APDMA Controller
+
+Required properties:
+- compatible should contain:
+  * "mediatek,mt2712-uart-dma" for MT2712 compatible APDMA
+  * "mediatek,mt6577-uart-dma" for MT6577 and all of the above
+
+- reg: The base address of the APDMA register bank.
+
+- interrupts: A single interrupt specifier.
+ One interrupt per dma-requests, or 8 if no dma-requests property is present
+
+- dma-requests: The number of DMA channels
+
+- clocks : Must contain an entry for each entry in clock-names.
+  See ../clocks/clock-bindings.txt for details.
+- clock-names: The APDMA clock for register accesses
+
+- mediatek,dma-33bits: Present if the DMA requires support
+
+Examples:
+
+	apdma: dma-controller@11000400 {
+		compatible = "mediatek,mt2712-uart-dma";
+		reg = <0 0x11000400 0 0x80>,
+		      <0 0x11000480 0 0x80>,
+		      <0 0x11000500 0 0x80>,
+		      <0 0x11000580 0 0x80>,
+		      <0 0x11000600 0 0x80>,
+		      <0 0x11000680 0 0x80>,
+		      <0 0x11000700 0 0x80>,
+		      <0 0x11000780 0 0x80>,
+		      <0 0x11000800 0 0x80>,
+		      <0 0x11000880 0 0x80>,
+		      <0 0x11000900 0 0x80>,
+		      <0 0x11000980 0 0x80>;
+		interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 105 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 106 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 107 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 108 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 109 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 111 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 112 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 114 IRQ_TYPE_LEVEL_LOW>;
+		dma-requests = <12>;
+		clocks = <&pericfg CLK_PERI_AP_DMA>;
+		clock-names = "apdma";
+		mediatek,dma-33bits;
+		#dma-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/dma/sun6i-dma.txt b/Documentation/devicetree/bindings/dma/sun6i-dma.txt
index 7fccc20d83311c22713370968638190720c90e1a..cae31f4e77ba964853f7a297421360d24a914c32 100644
--- a/Documentation/devicetree/bindings/dma/sun6i-dma.txt
+++ b/Documentation/devicetree/bindings/dma/sun6i-dma.txt
@@ -28,12 +28,17 @@ Example:
 	};
 
 ------------------------------------------------------------------------------
-For A64 DMA controller:
+For A64 and H6 DMA controller:
 
 Required properties:
-- compatible:	"allwinner,sun50i-a64-dma"
+- compatible:	Must be one of
+		  "allwinner,sun50i-a64-dma"
+		  "allwinner,sun50i-h6-dma"
 - dma-channels: Number of DMA channels supported by the controller.
 		Refer to Documentation/devicetree/bindings/dma/dma.txt
+- clocks:	In addition to parent AHB clock, it should also contain mbus
+		clock (H6 only)
+- clock-names:	Should contain "bus" and "mbus" (H6 only)
 - all properties above, i.e. reg, interrupts, clocks, resets and #dma-cells
 
 Optional properties:
diff --git a/Documentation/driver-api/dmaengine/dmatest.rst b/Documentation/driver-api/dmaengine/dmatest.rst
index e78d070bb468f9bd41e6330d28ccaaee267c6895..ee268d445d38b64b173098630a1db287f7bcd1b0 100644
--- a/Documentation/driver-api/dmaengine/dmatest.rst
+++ b/Documentation/driver-api/dmaengine/dmatest.rst
@@ -44,7 +44,8 @@ Example of usage::
 
     dmatest.timeout=2000 dmatest.iterations=1 dmatest.channel=dma0chan0 dmatest.run=1
 
-Example of multi-channel test usage:
+Example of multi-channel test usage (new in the 5.0 kernel)::
+
     % modprobe dmatest
     % echo 2000 > /sys/module/dmatest/parameters/timeout
     % echo 1 > /sys/module/dmatest/parameters/iterations
@@ -53,15 +54,18 @@ Example of multi-channel test usage:
     % echo dma0chan2 > /sys/module/dmatest/parameters/channel
     % echo 1 > /sys/module/dmatest/parameters/run
 
-Note: the channel parameter should always be the last parameter set prior to
-running the test (setting run=1), this is because upon setting the channel
-parameter, that specific channel is requested using the dmaengine and a thread
-is created with the existing parameters. This thread is set as pending
-and will be executed once run is set to 1. Any parameters set after the thread
-is created are not applied.
+.. note::
+  For all tests, starting in the 5.0 kernel, either single- or multi-channel,
+  the channel parameter(s) must be set after all other parameters. It is at
+  that time that the existing parameter values are acquired for use by the
+  thread(s). All other parameters are shared. Therefore, if changes are made
+  to any of the other parameters, and an additional channel specified, the
+  (shared) parameters used for all threads will use the new values.
+  After the channels are specified, each thread is set as pending. All threads
+  begin execution when the run parameter is set to 1.
 
 .. hint::
-  available channel list could be extracted by running the following command::
+  A list of available channels can be found by running the following command::
 
     % ls -1 /sys/class/dma/
 
@@ -204,6 +208,7 @@ Releasing Channels
 Channels can be freed by setting run to 0.
 
 Example::
+
     % echo dma0chan1 > /sys/module/dmatest/parameters/channel
     dmatest: Added 1 threads using dma0chan1
     % cat /sys/class/dma/dma0chan1/in_use
diff --git a/MAINTAINERS b/MAINTAINERS
index 51ef58f15cdd23776de44fe40e3a987bb34618ad..d452d7bbbaad01b5ffa24fbf98f16e98c7967848 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4683,6 +4683,13 @@ L:	linux-mtd@lists.infradead.org
 S:	Supported
 F:	drivers/mtd/nand/raw/denali*
 
+DESIGNWARE EDMA CORE IP DRIVER
+M:	Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+L:	dmaengine@vger.kernel.org
+S:	Maintained
+F:	drivers/dma/dw-edma/
+F:	include/linux/dma/edma.h
+
 DESIGNWARE USB2 DRD IP DRIVER
 M:	Minas Harutyunyan <hminas@synopsys.com>
 L:	linux-usb@vger.kernel.org
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 703275cc29de1f820676c5cc7f193b5b511a1f9c..03fa0c58cef3b8ff93268c61afafab35a77a77c3 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -103,6 +103,7 @@ config AXI_DMAC
 	depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_SOCFPGA || COMPILE_TEST
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
+	select REGMAP_MMIO
 	help
 	  Enable support for the Analog Devices AXI-DMAC peripheral. This DMA
 	  controller is often used in Analog Device's reference designs for FPGA
@@ -584,7 +585,7 @@ config TEGRA20_APB_DMA
 
 config TEGRA210_ADMA
 	tristate "NVIDIA Tegra210 ADMA support"
-	depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK
+	depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST)
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 	help
@@ -666,6 +667,8 @@ source "drivers/dma/qcom/Kconfig"
 
 source "drivers/dma/dw/Kconfig"
 
+source "drivers/dma/dw-edma/Kconfig"
+
 source "drivers/dma/hsu/Kconfig"
 
 source "drivers/dma/sh/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 6126e1c3a875684eca1cd6f91e95f784b5db51e1..5bddf6f8790f9a2b2d70e3f97a15abb7d1dea4f3 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o
 obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
 obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/
 obj-$(CONFIG_DW_DMAC_CORE) += dw/
+obj-$(CONFIG_DW_EDMA) += dw-edma/
 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
 obj-$(CONFIG_FSL_DMA) += fsldma.o
 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 464725dcad00112bba4e5ac40f0c15e165917858..9adc7a2fa3d3b597a864b63c3d14c38f3cb7c220 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2508,9 +2508,8 @@ DEFINE_SHOW_ATTRIBUTE(pl08x_debugfs);
 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
 {
 	/* Expose a simple debugfs interface to view all clocks */
-	(void) debugfs_create_file(dev_name(&pl08x->adev->dev),
-			S_IFREG | S_IRUGO, NULL, pl08x,
-			&pl08x_debugfs_fops);
+	debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO,
+			    NULL, pl08x, &pl08x_debugfs_fops);
 }
 
 #else
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 627ef3e5b3127f77f42b90e622d5782565e18b2c..b58ac720d9a122099dd6f9c7e60f28cc215dc0dc 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1568,11 +1568,14 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
 	struct at_xdmac_desc		*desc;
 	struct dma_async_tx_descriptor	*txd;
 
-	desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
-	txd = &desc->tx_dma_desc;
+	if (!list_empty(&atchan->xfers_list)) {
+		desc = list_first_entry(&atchan->xfers_list,
+					struct at_xdmac_desc, xfer_node);
+		txd = &desc->tx_dma_desc;
 
-	if (txd->flags & DMA_PREP_INTERRUPT)
-		dmaengine_desc_get_callback_invoke(txd, NULL);
+		if (txd->flags & DMA_PREP_INTERRUPT)
+			dmaengine_desc_get_callback_invoke(txd, NULL);
+	}
 }
 
 static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index fa81d0177765bad98888ebc0eaa40e2aa1980a2a..275e90fa829de8896308cc4c2dc5b604a6edee92 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -164,7 +164,6 @@ struct sba_device {
 	struct list_head reqs_free_list;
 	/* DebugFS directory entries */
 	struct dentry *root;
-	struct dentry *stats;
 };
 
 /* ====== Command helper routines ===== */
@@ -1716,17 +1715,11 @@ static int sba_probe(struct platform_device *pdev)
 
 	/* Create debugfs root entry */
 	sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
-	if (IS_ERR_OR_NULL(sba->root)) {
-		dev_err(sba->dev, "failed to create debugfs root entry\n");
-		sba->root = NULL;
-		goto skip_debugfs;
-	}
 
 	/* Create debugfs stats entry */
-	sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
-						 sba_debugfs_stats_show);
-	if (IS_ERR_OR_NULL(sba->stats))
-		dev_err(sba->dev, "failed to create debugfs stats file\n");
+	debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
+				    sba_debugfs_stats_show);
+
 skip_debugfs:
 
 	/* Register DMA device with Linux async framework */
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 547786ac342bd8ebd3c54e2c8032d56619027106..e51d836afcc7765d37ad5b084404aeebd13a8d69 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1378,10 +1378,8 @@ static int __init init_coh901318_debugfs(void)
 
 	dma_dentry = debugfs_create_dir("dma", NULL);
 
-	(void) debugfs_create_file("status",
-				   S_IFREG | S_IRUGO,
-				   dma_dentry, NULL,
-				   &coh901318_debugfs_status_operations);
+	debugfs_create_file("status", S_IFREG | S_IRUGO, dma_dentry, NULL,
+			    &coh901318_debugfs_status_operations);
 	return 0;
 }
 
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 8a3f1043917b248a38536a02ef6e4125f85e064c..a0ee404b736ed966098c94e170fd62a71bfb6a5e 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -2,7 +2,7 @@
 /*
  * Driver for the Analog Devices AXI-DMAC core
  *
- * Copyright 2013-2015 Analog Devices Inc.
+ * Copyright 2013-2019 Analog Devices Inc.
  *  Author: Lars-Peter Clausen <lars@metafoo.de>
  */
 
@@ -18,7 +18,9 @@
 #include <linux/of.h>
 #include <linux/of_dma.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/slab.h>
+#include <linux/fpga/adi-axi-common.h>
 
 #include <dt-bindings/dma/axi-dmac.h>
 
@@ -62,6 +64,8 @@
 #define AXI_DMAC_REG_STATUS		0x430
 #define AXI_DMAC_REG_CURRENT_SRC_ADDR	0x434
 #define AXI_DMAC_REG_CURRENT_DEST_ADDR	0x438
+#define AXI_DMAC_REG_PARTIAL_XFER_LEN	0x44c
+#define AXI_DMAC_REG_PARTIAL_XFER_ID	0x450
 
 #define AXI_DMAC_CTRL_ENABLE		BIT(0)
 #define AXI_DMAC_CTRL_PAUSE		BIT(1)
@@ -70,6 +74,10 @@
 #define AXI_DMAC_IRQ_EOT		BIT(1)
 
 #define AXI_DMAC_FLAG_CYCLIC		BIT(0)
+#define AXI_DMAC_FLAG_LAST		BIT(1)
+#define AXI_DMAC_FLAG_PARTIAL_REPORT	BIT(2)
+
+#define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
 
 /* The maximum ID allocated by the hardware is 31 */
 #define AXI_DMAC_SG_UNUSED 32U
@@ -82,12 +90,14 @@ struct axi_dmac_sg {
 	unsigned int dest_stride;
 	unsigned int src_stride;
 	unsigned int id;
+	unsigned int partial_len;
 	bool schedule_when_free;
 };
 
 struct axi_dmac_desc {
 	struct virt_dma_desc vdesc;
 	bool cyclic;
+	bool have_partial_xfer;
 
 	unsigned int num_submitted;
 	unsigned int num_completed;
@@ -108,8 +118,10 @@ struct axi_dmac_chan {
 	unsigned int dest_type;
 
 	unsigned int max_length;
-	unsigned int align_mask;
+	unsigned int address_align_mask;
+	unsigned int length_align_mask;
 
+	bool hw_partial_xfer;
 	bool hw_cyclic;
 	bool hw_2d;
 };
@@ -167,14 +179,14 @@ static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
 {
 	if (len == 0)
 		return false;
-	if ((len & chan->align_mask) != 0) /* Not aligned */
+	if ((len & chan->length_align_mask) != 0) /* Not aligned */
 		return false;
 	return true;
 }
 
 static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
 {
-	if ((addr & chan->align_mask) != 0) /* Not aligned */
+	if ((addr & chan->address_align_mask) != 0) /* Not aligned */
 		return false;
 	return true;
 }
@@ -210,11 +222,13 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
 	}
 
 	desc->num_submitted++;
-	if (desc->num_submitted == desc->num_sgs) {
+	if (desc->num_submitted == desc->num_sgs ||
+	    desc->have_partial_xfer) {
 		if (desc->cyclic)
 			desc->num_submitted = 0; /* Start again */
 		else
 			chan->next_desc = NULL;
+		flags |= AXI_DMAC_FLAG_LAST;
 	} else {
 		chan->next_desc = desc;
 	}
@@ -240,6 +254,9 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
 		desc->num_sgs == 1)
 		flags |= AXI_DMAC_FLAG_CYCLIC;
 
+	if (chan->hw_partial_xfer)
+		flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
+
 	axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
 	axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
 	axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
@@ -252,6 +269,83 @@ static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
 		struct axi_dmac_desc, vdesc.node);
 }
 
+static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
+	struct axi_dmac_sg *sg)
+{
+	if (chan->hw_2d)
+		return sg->x_len * sg->y_len;
+	else
+		return sg->x_len;
+}
+
+static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
+{
+	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+	struct axi_dmac_desc *desc;
+	struct axi_dmac_sg *sg;
+	u32 xfer_done, len, id, i;
+	bool found_sg;
+
+	do {
+		len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN);
+		id  = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID);
+
+		found_sg = false;
+		list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
+			for (i = 0; i < desc->num_sgs; i++) {
+				sg = &desc->sg[i];
+				if (sg->id == AXI_DMAC_SG_UNUSED)
+					continue;
+				if (sg->id == id) {
+					desc->have_partial_xfer = true;
+					sg->partial_len = len;
+					found_sg = true;
+					break;
+				}
+			}
+			if (found_sg)
+				break;
+		}
+
+		if (found_sg) {
+			dev_dbg(dmac->dma_dev.dev,
+				"Found partial segment id=%u, len=%u\n",
+				id, len);
+		} else {
+			dev_warn(dmac->dma_dev.dev,
+				 "Not found partial segment id=%u, len=%u\n",
+				 id, len);
+		}
+
+		/* Check if we have any more partial transfers */
+		xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
+		xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE);
+
+	} while (!xfer_done);
+}
+
+static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
+	struct axi_dmac_desc *active)
+{
+	struct dmaengine_result *rslt = &active->vdesc.tx_result;
+	unsigned int start = active->num_completed - 1;
+	struct axi_dmac_sg *sg;
+	unsigned int i, total;
+
+	rslt->result = DMA_TRANS_NOERROR;
+	rslt->residue = 0;
+
+	/*
+	 * We get here if the last completed segment is partial, which
+	 * means we can compute the residue from that segment onwards
+	 */
+	for (i = start; i < active->num_sgs; i++) {
+		sg = &active->sg[i];
+		total = axi_dmac_total_sg_bytes(chan, sg);
+		rslt->residue += (total - sg->partial_len);
+	}
+}
+
 static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
 	unsigned int completed_transfers)
 {
@@ -263,6 +357,10 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
 	if (!active)
 		return false;
 
+	if (chan->hw_partial_xfer &&
+	    (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
+		axi_dmac_dequeue_partial_xfers(chan);
+
 	do {
 		sg = &active->sg[active->num_completed];
 		if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
@@ -276,10 +374,14 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
 			start_next = true;
 		}
 
+		if (sg->partial_len)
+			axi_dmac_compute_residue(chan, active);
+
 		if (active->cyclic)
 			vchan_cyclic_callback(&active->vdesc);
 
-		if (active->num_completed == active->num_sgs) {
+		if (active->num_completed == active->num_sgs ||
+		    sg->partial_len) {
 			if (active->cyclic) {
 				active->num_completed = 0; /* wrap around */
 			} else {
@@ -391,7 +493,7 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
 	num_segments = DIV_ROUND_UP(period_len, chan->max_length);
 	segment_size = DIV_ROUND_UP(period_len, num_segments);
 	/* Take care of alignment */
-	segment_size = ((segment_size - 1) | chan->align_mask) + 1;
+	segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
 
 	for (i = 0; i < num_periods; i++) {
 		len = period_len;
@@ -561,6 +663,9 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
 		desc->sg[0].y_len = 1;
 	}
 
+	if (flags & DMA_CYCLIC)
+		desc->cyclic = true;
+
 	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
 }
 
@@ -574,6 +679,44 @@ static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
 	kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
 }
 
+static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case AXI_DMAC_REG_IRQ_MASK:
+	case AXI_DMAC_REG_IRQ_SOURCE:
+	case AXI_DMAC_REG_IRQ_PENDING:
+	case AXI_DMAC_REG_CTRL:
+	case AXI_DMAC_REG_TRANSFER_ID:
+	case AXI_DMAC_REG_START_TRANSFER:
+	case AXI_DMAC_REG_FLAGS:
+	case AXI_DMAC_REG_DEST_ADDRESS:
+	case AXI_DMAC_REG_SRC_ADDRESS:
+	case AXI_DMAC_REG_X_LENGTH:
+	case AXI_DMAC_REG_Y_LENGTH:
+	case AXI_DMAC_REG_DEST_STRIDE:
+	case AXI_DMAC_REG_SRC_STRIDE:
+	case AXI_DMAC_REG_TRANSFER_DONE:
+	case AXI_DMAC_REG_ACTIVE_TRANSFER_ID:
+	case AXI_DMAC_REG_STATUS:
+	case AXI_DMAC_REG_CURRENT_SRC_ADDR:
+	case AXI_DMAC_REG_CURRENT_DEST_ADDR:
+	case AXI_DMAC_REG_PARTIAL_XFER_LEN:
+	case AXI_DMAC_REG_PARTIAL_XFER_ID:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static const struct regmap_config axi_dmac_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.reg_stride = 4,
+	.max_register = AXI_DMAC_REG_PARTIAL_XFER_ID,
+	.readable_reg = axi_dmac_regmap_rdwr,
+	.writeable_reg = axi_dmac_regmap_rdwr,
+};
+
 /*
  * The configuration stored in the devicetree matches the configuration
  * parameters of the peripheral instance and allows the driver to know which
@@ -617,7 +760,7 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
 		return ret;
 	chan->dest_width = val / 8;
 
-	chan->align_mask = max(chan->dest_width, chan->src_width) - 1;
+	chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
 
 	if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
 		chan->direction = DMA_MEM_TO_MEM;
@@ -631,9 +774,12 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
 	return 0;
 }
 
-static void axi_dmac_detect_caps(struct axi_dmac *dmac)
+static int axi_dmac_detect_caps(struct axi_dmac *dmac)
 {
 	struct axi_dmac_chan *chan = &dmac->chan;
+	unsigned int version;
+
+	version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
 
 	axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
 	if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
@@ -647,6 +793,35 @@ static void axi_dmac_detect_caps(struct axi_dmac *dmac)
 	chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
 	if (chan->max_length != UINT_MAX)
 		chan->max_length++;
+
+	axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff);
+	if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 &&
+	    chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
+		dev_err(dmac->dma_dev.dev,
+			"Destination memory-mapped interface not supported.");
+		return -ENODEV;
+	}
+
+	axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff);
+	if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 &&
+	    chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
+		dev_err(dmac->dma_dev.dev,
+			"Source memory-mapped interface not supported.");
+		return -ENODEV;
+	}
+
+	if (version >= ADI_AXI_PCORE_VER(4, 2, 'a'))
+		chan->hw_partial_xfer = true;
+
+	if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) {
+		axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00);
+		chan->length_align_mask =
+			axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
+	} else {
+		chan->length_align_mask = chan->address_align_mask;
+	}
+
+	return 0;
 }
 
 static int axi_dmac_probe(struct platform_device *pdev)
@@ -722,7 +897,11 @@ static int axi_dmac_probe(struct platform_device *pdev)
 	if (ret < 0)
 		return ret;
 
-	axi_dmac_detect_caps(dmac);
+	ret = axi_dmac_detect_caps(dmac);
+	if (ret)
+		goto err_clk_disable;
+
+	dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
 
 	axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
 
@@ -742,6 +921,8 @@ static int axi_dmac_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, dmac);
 
+	devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config);
+
 	return 0;
 
 err_unregister_of:
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 6b8c4c458e8a39a0b9500804a3c0a4cb8f6e5839..7fe9309a876bde8fcb6e035dd0fd27baf464f333 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -156,7 +156,6 @@ struct jz4780_dma_dev {
 };
 
 struct jz4780_dma_filter_data {
-	struct device_node *of_node;
 	uint32_t transfer_type;
 	int channel;
 };
@@ -772,8 +771,6 @@ static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
 	struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
 	struct jz4780_dma_filter_data *data = param;
 
-	if (jzdma->dma_device.dev->of_node != data->of_node)
-		return false;
 
 	if (data->channel > -1) {
 		if (data->channel != jzchan->id)
@@ -797,7 +794,6 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
 	if (dma_spec->args_count != 2)
 		return NULL;
 
-	data.of_node = ofdma->of_node;
 	data.transfer_type = dma_spec->args[0];
 	data.channel = dma_spec->args[1];
 
@@ -822,7 +818,8 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
 		return dma_get_slave_channel(
 			&jzdma->chan[data.channel].vchan.chan);
 	} else {
-		return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
+		return __dma_request_channel(&mask, jz4780_dma_filter_fn, &data,
+					     ofdma->of_node);
 	}
 }
 
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 58cbf9fd5a46c33c4ab946da34b0b382cb735cf9..03ac4b96117cd8dbf423998c6fa3059cdb532dfa 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -61,7 +61,7 @@ static long dmaengine_ref_count;
 /* --- sysfs implementation --- */
 
 /**
- * dev_to_dma_chan - convert a device pointer to the its sysfs container object
+ * dev_to_dma_chan - convert a device pointer to its sysfs container object
  * @dev - device node
  *
  * Must be called under dma_list_mutex
@@ -629,11 +629,13 @@ EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
  * @mask: capabilities that the channel must satisfy
  * @fn: optional callback to disposition available channels
  * @fn_param: opaque parameter to pass to dma_filter_fn
+ * @np: device node to look for DMA channels
  *
  * Returns pointer to appropriate DMA channel on success or NULL.
  */
 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
-				       dma_filter_fn fn, void *fn_param)
+				       dma_filter_fn fn, void *fn_param,
+				       struct device_node *np)
 {
 	struct dma_device *device, *_d;
 	struct dma_chan *chan = NULL;
@@ -641,6 +643,10 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 	/* Find a channel */
 	mutex_lock(&dma_list_mutex);
 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
+		/* Finds a DMA controller with matching device node */
+		if (np && device->dev->of_node && np != device->dev->of_node)
+			continue;
+
 		chan = find_candidate(device, mask, fn, fn_param);
 		if (!IS_ERR(chan))
 			break;
@@ -699,7 +705,7 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
 		chan = acpi_dma_request_slave_chan_by_name(dev, name);
 
 	if (chan) {
-		/* Valid channel found or requester need to be deferred */
+		/* Valid channel found or requester needs to be deferred */
 		if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
 			return chan;
 	}
@@ -757,7 +763,7 @@ struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
 	if (!mask)
 		return ERR_PTR(-ENODEV);
 
-	chan = __dma_request_channel(mask, NULL, NULL);
+	chan = __dma_request_channel(mask, NULL, NULL, NULL);
 	if (!chan) {
 		mutex_lock(&dma_list_mutex);
 		if (list_empty(&dma_device_list))
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index d0ad46e916a6613ef472a5c79da669be2da6184f..3d22ae8dca7203ab797646ed6e972aedeb84a32b 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -62,7 +62,7 @@ MODULE_PARM_DESC(pq_sources,
 static int timeout = 3000;
 module_param(timeout, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
-		 "Pass -1 for infinite timeout");
+		 "Pass 0xFFFFFFFF (4294967295) for maximum timeout");
 
 static bool noverify;
 module_param(noverify, bool, S_IRUGO | S_IWUSR);
@@ -94,7 +94,7 @@ MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default
  * @iterations:		iterations before stopping test
  * @xor_sources:	number of xor source buffers
  * @pq_sources:		number of p+q source buffers
- * @timeout:		transfer timeout in msec, -1 for infinite timeout
+ * @timeout:		transfer timeout in msec, 0 - 0xFFFFFFFF (4294967295)
  */
 struct dmatest_params {
 	unsigned int	buf_size;
@@ -105,7 +105,7 @@ struct dmatest_params {
 	unsigned int	iterations;
 	unsigned int	xor_sources;
 	unsigned int	pq_sources;
-	int		timeout;
+	unsigned int	timeout;
 	bool		noverify;
 	bool		norandom;
 	int		alignment;
diff --git a/drivers/dma/dw-edma/Kconfig b/drivers/dma/dw-edma/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..7ff17b2db6a161da2f471eb463d87288f75fe5af
--- /dev/null
+++ b/drivers/dma/dw-edma/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config DW_EDMA
+	tristate "Synopsys DesignWare eDMA controller driver"
+	depends on PCI && PCI_MSI
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support the Synopsys DesignWare eDMA controller, normally
+	  implemented on endpoints SoCs.
+
+config DW_EDMA_PCIE
+	tristate "Synopsys DesignWare eDMA PCIe driver"
+	depends on PCI && PCI_MSI
+	select DW_EDMA
+	help
+	  Provides a glue-logic between the Synopsys DesignWare
+	  eDMA controller and an endpoint PCIe device. This also serves
+	  as a reference design to whom desires to use this IP.
diff --git a/drivers/dma/dw-edma/Makefile b/drivers/dma/dw-edma/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..8d45c0d5689d5738a81455fa50fd9647ccee8552
--- /dev/null
+++ b/drivers/dma/dw-edma/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_DW_EDMA)		+= dw-edma.o
+dw-edma-$(CONFIG_DEBUG_FS)	:= dw-edma-v0-debugfs.o
+dw-edma-objs			:= dw-edma-core.o \
+					dw-edma-v0-core.o $(dw-edma-y)
+obj-$(CONFIG_DW_EDMA_PCIE)	+= dw-edma-pcie.o
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
new file mode 100644
index 0000000000000000000000000000000000000000..ff392c01bad1f67d846b8f9505a1ffcb831e9982
--- /dev/null
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -0,0 +1,937 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare eDMA core driver
+ *
+ * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/dma/edma.h>
+#include <linux/pci.h>
+
+#include "dw-edma-core.h"
+#include "dw-edma-v0-core.h"
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+static inline
+struct device *dchan2dev(struct dma_chan *dchan)
+{
+	return &dchan->dev->device;
+}
+
+static inline
+struct device *chan2dev(struct dw_edma_chan *chan)
+{
+	return &chan->vc.chan.dev->device;
+}
+
+static inline
+struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
+{
+	return container_of(vd, struct dw_edma_desc, vd);
+}
+
+static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
+{
+	struct dw_edma_burst *burst;
+
+	burst = kzalloc(sizeof(*burst), GFP_NOWAIT);
+	if (unlikely(!burst))
+		return NULL;
+
+	INIT_LIST_HEAD(&burst->list);
+	if (chunk->burst) {
+		/* Create and add new element into the linked list */
+		chunk->bursts_alloc++;
+		list_add_tail(&burst->list, &chunk->burst->list);
+	} else {
+		/* List head */
+		chunk->bursts_alloc = 0;
+		chunk->burst = burst;
+	}
+
+	return burst;
+}
+
+static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
+{
+	struct dw_edma_chan *chan = desc->chan;
+	struct dw_edma *dw = chan->chip->dw;
+	struct dw_edma_chunk *chunk;
+
+	chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
+	if (unlikely(!chunk))
+		return NULL;
+
+	INIT_LIST_HEAD(&chunk->list);
+	chunk->chan = chan;
+	/* Toggling change bit (CB) in each chunk, this is a mechanism to
+	 * inform the eDMA HW block that this is a new linked list ready
+	 * to be consumed.
+	 *  - Odd chunks originate CB equal to 0
+	 *  - Even chunks originate CB equal to 1
+	 */
+	chunk->cb = !(desc->chunks_alloc % 2);
+	chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off;
+	chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off;
+
+	if (desc->chunk) {
+		/* Create and add new element into the linked list */
+		desc->chunks_alloc++;
+		list_add_tail(&chunk->list, &desc->chunk->list);
+		if (!dw_edma_alloc_burst(chunk)) {
+			kfree(chunk);
+			return NULL;
+		}
+	} else {
+		/* List head */
+		chunk->burst = NULL;
+		desc->chunks_alloc = 0;
+		desc->chunk = chunk;
+	}
+
+	return chunk;
+}
+
+static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan)
+{
+	struct dw_edma_desc *desc;
+
+	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+	if (unlikely(!desc))
+		return NULL;
+
+	desc->chan = chan;
+	if (!dw_edma_alloc_chunk(desc)) {
+		kfree(desc);
+		return NULL;
+	}
+
+	return desc;
+}
+
+static void dw_edma_free_burst(struct dw_edma_chunk *chunk)
+{
+	struct dw_edma_burst *child, *_next;
+
+	/* Remove all the list elements */
+	list_for_each_entry_safe(child, _next, &chunk->burst->list, list) {
+		list_del(&child->list);
+		kfree(child);
+		chunk->bursts_alloc--;
+	}
+
+	/* Remove the list head */
+	kfree(child);
+	chunk->burst = NULL;
+}
+
+static void dw_edma_free_chunk(struct dw_edma_desc *desc)
+{
+	struct dw_edma_chunk *child, *_next;
+
+	if (!desc->chunk)
+		return;
+
+	/* Remove all the list elements */
+	list_for_each_entry_safe(child, _next, &desc->chunk->list, list) {
+		dw_edma_free_burst(child);
+		list_del(&child->list);
+		kfree(child);
+		desc->chunks_alloc--;
+	}
+
+	/* Remove the list head */
+	kfree(child);
+	desc->chunk = NULL;
+}
+
+static void dw_edma_free_desc(struct dw_edma_desc *desc)
+{
+	dw_edma_free_chunk(desc);
+	kfree(desc);
+}
+
+static void vchan_free_desc(struct virt_dma_desc *vdesc)
+{
+	dw_edma_free_desc(vd2dw_edma_desc(vdesc));
+}
+
+static void dw_edma_start_transfer(struct dw_edma_chan *chan)
+{
+	struct dw_edma_chunk *child;
+	struct dw_edma_desc *desc;
+	struct virt_dma_desc *vd;
+
+	vd = vchan_next_desc(&chan->vc);
+	if (!vd)
+		return;
+
+	desc = vd2dw_edma_desc(vd);
+	if (!desc)
+		return;
+
+	child = list_first_entry_or_null(&desc->chunk->list,
+					 struct dw_edma_chunk, list);
+	if (!child)
+		return;
+
+	dw_edma_v0_core_start(child, !desc->xfer_sz);
+	desc->xfer_sz += child->ll_region.sz;
+	dw_edma_free_burst(child);
+	list_del(&child->list);
+	kfree(child);
+	desc->chunks_alloc--;
+}
+
+static int dw_edma_device_config(struct dma_chan *dchan,
+				 struct dma_slave_config *config)
+{
+	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+
+	memcpy(&chan->config, config, sizeof(*config));
+	chan->configured = true;
+
+	return 0;
+}
+
+static int dw_edma_device_pause(struct dma_chan *dchan)
+{
+	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+	int err = 0;
+
+	if (!chan->configured)
+		err = -EPERM;
+	else if (chan->status != EDMA_ST_BUSY)
+		err = -EPERM;
+	else if (chan->request != EDMA_REQ_NONE)
+		err = -EPERM;
+	else
+		chan->request = EDMA_REQ_PAUSE;
+
+	return err;
+}
+
+static int dw_edma_device_resume(struct dma_chan *dchan)
+{
+	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+	int err = 0;
+
+	if (!chan->configured) {
+		err = -EPERM;
+	} else if (chan->status != EDMA_ST_PAUSE) {
+		err = -EPERM;
+	} else if (chan->request != EDMA_REQ_NONE) {
+		err = -EPERM;
+	} else {
+		chan->status = EDMA_ST_BUSY;
+		dw_edma_start_transfer(chan);
+	}
+
+	return err;
+}
+
+static int dw_edma_device_terminate_all(struct dma_chan *dchan)
+{
+	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+	int err = 0;
+	LIST_HEAD(head);
+
+	if (!chan->configured) {
+		/* Do nothing */
+	} else if (chan->status == EDMA_ST_PAUSE) {
+		chan->status = EDMA_ST_IDLE;
+		chan->configured = false;
+	} else if (chan->status == EDMA_ST_IDLE) {
+		chan->configured = false;
+	} else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) {
+		/*
+		 * The channel is in a false BUSY state, probably didn't
+		 * receive or lost an interrupt
+		 */
+		chan->status = EDMA_ST_IDLE;
+		chan->configured = false;
+	} else if (chan->request > EDMA_REQ_PAUSE) {
+		err = -EPERM;
+	} else {
+		chan->request = EDMA_REQ_STOP;
+	}
+
+	return err;
+}
+
+static void dw_edma_device_issue_pending(struct dma_chan *dchan)
+{
+	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->vc.lock, flags);
+	if (chan->configured && chan->request == EDMA_REQ_NONE &&
+	    chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
+		chan->status = EDMA_ST_BUSY;
+		dw_edma_start_transfer(chan);
+	}
+	spin_unlock_irqrestore(&chan->vc.lock, flags);
+}
+
+static enum dma_status
+dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
+			 struct dma_tx_state *txstate)
+{
+	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+	struct dw_edma_desc *desc;
+	struct virt_dma_desc *vd;
+	unsigned long flags;
+	enum dma_status ret;
+	u32 residue = 0;
+
+	ret = dma_cookie_status(dchan, cookie, txstate);
+	if (ret == DMA_COMPLETE)
+		return ret;
+
+	if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE)
+		ret = DMA_PAUSED;
+
+	if (!txstate)
+		goto ret_residue;
+
+	spin_lock_irqsave(&chan->vc.lock, flags);
+	vd = vchan_find_desc(&chan->vc, cookie);
+	if (vd) {
+		desc = vd2dw_edma_desc(vd);
+		if (desc)
+			residue = desc->alloc_sz - desc->xfer_sz;
+	}
+	spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ret_residue:
+	dma_set_residue(txstate, residue);
+
+	return ret;
+}
+
+static struct dma_async_tx_descriptor *
+dw_edma_device_transfer(struct dw_edma_transfer *xfer)
+{
+	struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
+	enum dma_transfer_direction direction = xfer->direction;
+	phys_addr_t src_addr, dst_addr;
+	struct scatterlist *sg = NULL;
+	struct dw_edma_chunk *chunk;
+	struct dw_edma_burst *burst;
+	struct dw_edma_desc *desc;
+	u32 cnt;
+	int i;
+
+	if ((direction == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) ||
+	    (direction == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ))
+		return NULL;
+
+	if (xfer->cyclic) {
+		if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
+			return NULL;
+	} else {
+		if (xfer->xfer.sg.len < 1)
+			return NULL;
+	}
+
+	if (!chan->configured)
+		return NULL;
+
+	desc = dw_edma_alloc_desc(chan);
+	if (unlikely(!desc))
+		goto err_alloc;
+
+	chunk = dw_edma_alloc_chunk(desc);
+	if (unlikely(!chunk))
+		goto err_alloc;
+
+	src_addr = chan->config.src_addr;
+	dst_addr = chan->config.dst_addr;
+
+	if (xfer->cyclic) {
+		cnt = xfer->xfer.cyclic.cnt;
+	} else {
+		cnt = xfer->xfer.sg.len;
+		sg = xfer->xfer.sg.sgl;
+	}
+
+	for (i = 0; i < cnt; i++) {
+		if (!xfer->cyclic && !sg)
+			break;
+
+		if (chunk->bursts_alloc == chan->ll_max) {
+			chunk = dw_edma_alloc_chunk(desc);
+			if (unlikely(!chunk))
+				goto err_alloc;
+		}
+
+		burst = dw_edma_alloc_burst(chunk);
+		if (unlikely(!burst))
+			goto err_alloc;
+
+		if (xfer->cyclic)
+			burst->sz = xfer->xfer.cyclic.len;
+		else
+			burst->sz = sg_dma_len(sg);
+
+		chunk->ll_region.sz += burst->sz;
+		desc->alloc_sz += burst->sz;
+
+		if (direction == DMA_DEV_TO_MEM) {
+			burst->sar = src_addr;
+			if (xfer->cyclic) {
+				burst->dar = xfer->xfer.cyclic.paddr;
+			} else {
+				burst->dar = sg_dma_address(sg);
+				/* Unlike the typical assumption by other
+				 * drivers/IPs the peripheral memory isn't
+				 * a FIFO memory, in this case, it's a
+				 * linear memory and that why the source
+				 * and destination addresses are increased
+				 * by the same portion (data length)
+				 */
+				src_addr += sg_dma_len(sg);
+			}
+		} else {
+			burst->dar = dst_addr;
+			if (xfer->cyclic) {
+				burst->sar = xfer->xfer.cyclic.paddr;
+			} else {
+				burst->sar = sg_dma_address(sg);
+				/* Unlike the typical assumption by other
+				 * drivers/IPs the peripheral memory isn't
+				 * a FIFO memory, in this case, it's a
+				 * linear memory and that why the source
+				 * and destination addresses are increased
+				 * by the same portion (data length)
+				 */
+				dst_addr += sg_dma_len(sg);
+			}
+		}
+
+		if (!xfer->cyclic)
+			sg = sg_next(sg);
+	}
+
+	return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
+
+err_alloc:
+	if (desc)
+		dw_edma_free_desc(desc);
+
+	return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+			     unsigned int len,
+			     enum dma_transfer_direction direction,
+			     unsigned long flags, void *context)
+{
+	struct dw_edma_transfer xfer;
+
+	xfer.dchan = dchan;
+	xfer.direction = direction;
+	xfer.xfer.sg.sgl = sgl;
+	xfer.xfer.sg.len = len;
+	xfer.flags = flags;
+	xfer.cyclic = false;
+
+	return dw_edma_device_transfer(&xfer);
+}
+
+static struct dma_async_tx_descriptor *
+dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
+			       size_t len, size_t count,
+			       enum dma_transfer_direction direction,
+			       unsigned long flags)
+{
+	struct dw_edma_transfer xfer;
+
+	xfer.dchan = dchan;
+	xfer.direction = direction;
+	xfer.xfer.cyclic.paddr = paddr;
+	xfer.xfer.cyclic.len = len;
+	xfer.xfer.cyclic.cnt = count;
+	xfer.flags = flags;
+	xfer.cyclic = true;
+
+	return dw_edma_device_transfer(&xfer);
+}
+
+static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
+{
+	struct dw_edma_desc *desc;
+	struct virt_dma_desc *vd;
+	unsigned long flags;
+
+	dw_edma_v0_core_clear_done_int(chan);
+
+	spin_lock_irqsave(&chan->vc.lock, flags);
+	vd = vchan_next_desc(&chan->vc);
+	if (vd) {
+		switch (chan->request) {
+		case EDMA_REQ_NONE:
+			desc = vd2dw_edma_desc(vd);
+			if (desc->chunks_alloc) {
+				chan->status = EDMA_ST_BUSY;
+				dw_edma_start_transfer(chan);
+			} else {
+				list_del(&vd->node);
+				vchan_cookie_complete(vd);
+				chan->status = EDMA_ST_IDLE;
+			}
+			break;
+
+		case EDMA_REQ_STOP:
+			list_del(&vd->node);
+			vchan_cookie_complete(vd);
+			chan->request = EDMA_REQ_NONE;
+			chan->status = EDMA_ST_IDLE;
+			break;
+
+		case EDMA_REQ_PAUSE:
+			chan->request = EDMA_REQ_NONE;
+			chan->status = EDMA_ST_PAUSE;
+			break;
+
+		default:
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&chan->vc.lock, flags);
+}
+
+static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
+{
+	struct virt_dma_desc *vd;
+	unsigned long flags;
+
+	dw_edma_v0_core_clear_abort_int(chan);
+
+	spin_lock_irqsave(&chan->vc.lock, flags);
+	vd = vchan_next_desc(&chan->vc);
+	if (vd) {
+		list_del(&vd->node);
+		vchan_cookie_complete(vd);
+	}
+	spin_unlock_irqrestore(&chan->vc.lock, flags);
+	chan->request = EDMA_REQ_NONE;
+	chan->status = EDMA_ST_IDLE;
+}
+
+static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write)
+{
+	struct dw_edma_irq *dw_irq = data;
+	struct dw_edma *dw = dw_irq->dw;
+	unsigned long total, pos, val;
+	unsigned long off;
+	u32 mask;
+
+	if (write) {
+		total = dw->wr_ch_cnt;
+		off = 0;
+		mask = dw_irq->wr_mask;
+	} else {
+		total = dw->rd_ch_cnt;
+		off = dw->wr_ch_cnt;
+		mask = dw_irq->rd_mask;
+	}
+
+	val = dw_edma_v0_core_status_done_int(dw, write ?
+							  EDMA_DIR_WRITE :
+							  EDMA_DIR_READ);
+	val &= mask;
+	for_each_set_bit(pos, &val, total) {
+		struct dw_edma_chan *chan = &dw->chan[pos + off];
+
+		dw_edma_done_interrupt(chan);
+	}
+
+	val = dw_edma_v0_core_status_abort_int(dw, write ?
+							   EDMA_DIR_WRITE :
+							   EDMA_DIR_READ);
+	val &= mask;
+	for_each_set_bit(pos, &val, total) {
+		struct dw_edma_chan *chan = &dw->chan[pos + off];
+
+		dw_edma_abort_interrupt(chan);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
+{
+	return dw_edma_interrupt(irq, data, true);
+}
+
+static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
+{
+	return dw_edma_interrupt(irq, data, false);
+}
+
+static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
+{
+	dw_edma_interrupt(irq, data, true);
+	dw_edma_interrupt(irq, data, false);
+
+	return IRQ_HANDLED;
+}
+
+static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
+{
+	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+
+	if (chan->status != EDMA_ST_IDLE)
+		return -EBUSY;
+
+	pm_runtime_get(chan->chip->dev);
+
+	return 0;
+}
+
+static void dw_edma_free_chan_resources(struct dma_chan *dchan)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(5000);
+	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+	int ret;
+
+	while (time_before(jiffies, timeout)) {
+		ret = dw_edma_device_terminate_all(dchan);
+		if (!ret)
+			break;
+
+		if (time_after_eq(jiffies, timeout))
+			return;
+
+		cpu_relax();
+	}
+
+	pm_runtime_put(chan->chip->dev);
+}
+
+static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
+				 u32 wr_alloc, u32 rd_alloc)
+{
+	struct dw_edma_region *dt_region;
+	struct device *dev = chip->dev;
+	struct dw_edma *dw = chip->dw;
+	struct dw_edma_chan *chan;
+	size_t ll_chunk, dt_chunk;
+	struct dw_edma_irq *irq;
+	struct dma_device *dma;
+	u32 i, j, cnt, ch_cnt;
+	u32 alloc, off_alloc;
+	int err = 0;
+	u32 pos;
+
+	ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
+	ll_chunk = dw->ll_region.sz;
+	dt_chunk = dw->dt_region.sz;
+
+	/* Calculate linked list chunk for each channel */
+	ll_chunk /= roundup_pow_of_two(ch_cnt);
+
+	/* Calculate linked list chunk for each channel */
+	dt_chunk /= roundup_pow_of_two(ch_cnt);
+
+	if (write) {
+		i = 0;
+		cnt = dw->wr_ch_cnt;
+		dma = &dw->wr_edma;
+		alloc = wr_alloc;
+		off_alloc = 0;
+	} else {
+		i = dw->wr_ch_cnt;
+		cnt = dw->rd_ch_cnt;
+		dma = &dw->rd_edma;
+		alloc = rd_alloc;
+		off_alloc = wr_alloc;
+	}
+
+	INIT_LIST_HEAD(&dma->channels);
+	for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) {
+		chan = &dw->chan[i];
+
+		dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL);
+		if (!dt_region)
+			return -ENOMEM;
+
+		chan->vc.chan.private = dt_region;
+
+		chan->chip = chip;
+		chan->id = j;
+		chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
+		chan->configured = false;
+		chan->request = EDMA_REQ_NONE;
+		chan->status = EDMA_ST_IDLE;
+
+		chan->ll_off = (ll_chunk * i);
+		chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1;
+
+		chan->dt_off = (dt_chunk * i);
+
+		dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n",
+			 write ? "write" : "read", j,
+			 chan->ll_off, chan->ll_max);
+
+		if (dw->nr_irqs == 1)
+			pos = 0;
+		else
+			pos = off_alloc + (j % alloc);
+
+		irq = &dw->irq[pos];
+
+		if (write)
+			irq->wr_mask |= BIT(j);
+		else
+			irq->rd_mask |= BIT(j);
+
+		irq->dw = dw;
+		memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
+
+		dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
+			 write ? "write" : "read", j,
+			 chan->msi.address_hi, chan->msi.address_lo,
+			 chan->msi.data);
+
+		chan->vc.desc_free = vchan_free_desc;
+		vchan_init(&chan->vc, dma);
+
+		dt_region->paddr = dw->dt_region.paddr + chan->dt_off;
+		dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off;
+		dt_region->sz = dt_chunk;
+
+		dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n",
+			 write ? "write" : "read", j, chan->dt_off);
+
+		dw_edma_v0_core_device_config(chan);
+	}
+
+	/* Set DMA channel capabilities */
+	dma_cap_zero(dma->cap_mask);
+	dma_cap_set(DMA_SLAVE, dma->cap_mask);
+	dma_cap_set(DMA_CYCLIC, dma->cap_mask);
+	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+	dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
+	dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+	dma->chancnt = cnt;
+
+	/* Set DMA channel callbacks */
+	dma->dev = chip->dev;
+	dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
+	dma->device_free_chan_resources = dw_edma_free_chan_resources;
+	dma->device_config = dw_edma_device_config;
+	dma->device_pause = dw_edma_device_pause;
+	dma->device_resume = dw_edma_device_resume;
+	dma->device_terminate_all = dw_edma_device_terminate_all;
+	dma->device_issue_pending = dw_edma_device_issue_pending;
+	dma->device_tx_status = dw_edma_device_tx_status;
+	dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
+	dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
+
+	dma_set_max_seg_size(dma->dev, U32_MAX);
+
+	/* Register DMA device */
+	err = dma_async_device_register(dma);
+
+	return err;
+}
+
+static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
+{
+	if (*nr_irqs && *alloc < cnt) {
+		(*alloc)++;
+		(*nr_irqs)--;
+	}
+}
+
+static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
+{
+	while (*mask * alloc < cnt)
+		(*mask)++;
+}
+
+static int dw_edma_irq_request(struct dw_edma_chip *chip,
+			       u32 *wr_alloc, u32 *rd_alloc)
+{
+	struct device *dev = chip->dev;
+	struct dw_edma *dw = chip->dw;
+	u32 wr_mask = 1;
+	u32 rd_mask = 1;
+	int i, err = 0;
+	u32 ch_cnt;
+
+	ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
+
+	if (dw->nr_irqs < 1)
+		return -EINVAL;
+
+	if (dw->nr_irqs == 1) {
+		/* Common IRQ shared among all channels */
+		err = request_irq(pci_irq_vector(to_pci_dev(dev), 0),
+				  dw_edma_interrupt_common,
+				  IRQF_SHARED, dw->name, &dw->irq[0]);
+		if (err) {
+			dw->nr_irqs = 0;
+			return err;
+		}
+
+		get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), 0),
+				   &dw->irq[0].msi);
+	} else {
+		/* Distribute IRQs equally among all channels */
+		int tmp = dw->nr_irqs;
+
+		while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
+			dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
+			dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt);
+		}
+
+		dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt);
+		dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
+
+		for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
+			err = request_irq(pci_irq_vector(to_pci_dev(dev), i),
+					  i < *wr_alloc ?
+						dw_edma_interrupt_write :
+						dw_edma_interrupt_read,
+					  IRQF_SHARED, dw->name,
+					  &dw->irq[i]);
+			if (err) {
+				dw->nr_irqs = i;
+				return err;
+			}
+
+			get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), i),
+					   &dw->irq[i].msi);
+		}
+
+		dw->nr_irqs = i;
+	}
+
+	return err;
+}
+
+int dw_edma_probe(struct dw_edma_chip *chip)
+{
+	struct device *dev = chip->dev;
+	struct dw_edma *dw = chip->dw;
+	u32 wr_alloc = 0;
+	u32 rd_alloc = 0;
+	int i, err;
+
+	raw_spin_lock_init(&dw->lock);
+
+	/* Find out how many write channels are supported by hardware */
+	dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE);
+	if (!dw->wr_ch_cnt)
+		return -EINVAL;
+
+	/* Find out how many read channels are supported by hardware */
+	dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ);
+	if (!dw->rd_ch_cnt)
+		return -EINVAL;
+
+	dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
+		 dw->wr_ch_cnt, dw->rd_ch_cnt);
+
+	/* Allocate channels */
+	dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt,
+				sizeof(*dw->chan), GFP_KERNEL);
+	if (!dw->chan)
+		return -ENOMEM;
+
+	snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id);
+
+	/* Disable eDMA, only to establish the ideal initial conditions */
+	dw_edma_v0_core_off(dw);
+
+	/* Request IRQs */
+	err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc);
+	if (err)
+		return err;
+
+	/* Setup write channels */
+	err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc);
+	if (err)
+		goto err_irq_free;
+
+	/* Setup read channels */
+	err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc);
+	if (err)
+		goto err_irq_free;
+
+	/* Power management */
+	pm_runtime_enable(dev);
+
+	/* Turn debugfs on */
+	dw_edma_v0_core_debugfs_on(chip);
+
+	return 0;
+
+err_irq_free:
+	for (i = (dw->nr_irqs - 1); i >= 0; i--)
+		free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]);
+
+	dw->nr_irqs = 0;
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(dw_edma_probe);
+
+int dw_edma_remove(struct dw_edma_chip *chip)
+{
+	struct dw_edma_chan *chan, *_chan;
+	struct device *dev = chip->dev;
+	struct dw_edma *dw = chip->dw;
+	int i;
+
+	/* Disable eDMA */
+	dw_edma_v0_core_off(dw);
+
+	/* Free irqs */
+	for (i = (dw->nr_irqs - 1); i >= 0; i--)
+		free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]);
+
+	/* Power management */
+	pm_runtime_disable(dev);
+
+	list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
+				 vc.chan.device_node) {
+		list_del(&chan->vc.chan.device_node);
+		tasklet_kill(&chan->vc.task);
+	}
+
+	list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
+				 vc.chan.device_node) {
+		list_del(&chan->vc.chan.device_node);
+		tasklet_kill(&chan->vc.task);
+	}
+
+	/* Deregister eDMA device */
+	dma_async_device_unregister(&dw->wr_edma);
+	dma_async_device_unregister(&dw->rd_edma);
+
+	/* Turn debugfs off */
+	dw_edma_v0_core_debugfs_off();
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dw_edma_remove);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver");
+MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
new file mode 100644
index 0000000000000000000000000000000000000000..b6cc90cbc9dc244734fd763a9ed34829419c994a
--- /dev/null
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare eDMA core driver
+ *
+ * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+ */
+
+#ifndef _DW_EDMA_CORE_H
+#define _DW_EDMA_CORE_H
+
+#include <linux/msi.h>
+#include <linux/dma/edma.h>
+
+#include "../virt-dma.h"
+
+#define EDMA_LL_SZ					24
+
+enum dw_edma_dir {
+	EDMA_DIR_WRITE = 0,
+	EDMA_DIR_READ
+};
+
+enum dw_edma_mode {
+	EDMA_MODE_LEGACY = 0,
+	EDMA_MODE_UNROLL
+};
+
+enum dw_edma_request {
+	EDMA_REQ_NONE = 0,
+	EDMA_REQ_STOP,
+	EDMA_REQ_PAUSE
+};
+
+enum dw_edma_status {
+	EDMA_ST_IDLE = 0,
+	EDMA_ST_PAUSE,
+	EDMA_ST_BUSY
+};
+
+struct dw_edma_chan;
+struct dw_edma_chunk;
+
+struct dw_edma_burst {
+	struct list_head		list;
+	u64				sar;
+	u64				dar;
+	u32				sz;
+};
+
+struct dw_edma_region {
+	phys_addr_t			paddr;
+	dma_addr_t			vaddr;
+	size_t				sz;
+};
+
+struct dw_edma_chunk {
+	struct list_head		list;
+	struct dw_edma_chan		*chan;
+	struct dw_edma_burst		*burst;
+
+	u32				bursts_alloc;
+
+	u8				cb;
+	struct dw_edma_region		ll_region;	/* Linked list */
+};
+
+struct dw_edma_desc {
+	struct virt_dma_desc		vd;
+	struct dw_edma_chan		*chan;
+	struct dw_edma_chunk		*chunk;
+
+	u32				chunks_alloc;
+
+	u32				alloc_sz;
+	u32				xfer_sz;
+};
+
+struct dw_edma_chan {
+	struct virt_dma_chan		vc;
+	struct dw_edma_chip		*chip;
+	int				id;
+	enum dw_edma_dir		dir;
+
+	off_t				ll_off;
+	u32				ll_max;
+
+	off_t				dt_off;
+
+	struct msi_msg			msi;
+
+	enum dw_edma_request		request;
+	enum dw_edma_status		status;
+	u8				configured;
+
+	struct dma_slave_config		config;
+};
+
+struct dw_edma_irq {
+	struct msi_msg                  msi;
+	u32				wr_mask;
+	u32				rd_mask;
+	struct dw_edma			*dw;
+};
+
+struct dw_edma {
+	char				name[20];
+
+	struct dma_device		wr_edma;
+	u16				wr_ch_cnt;
+
+	struct dma_device		rd_edma;
+	u16				rd_ch_cnt;
+
+	struct dw_edma_region		rg_region;	/* Registers */
+	struct dw_edma_region		ll_region;	/* Linked list */
+	struct dw_edma_region		dt_region;	/* Data */
+
+	struct dw_edma_irq		*irq;
+	int				nr_irqs;
+
+	u32				version;
+	enum dw_edma_mode		mode;
+
+	struct dw_edma_chan		*chan;
+	const struct dw_edma_core_ops	*ops;
+
+	raw_spinlock_t			lock;		/* Only for legacy */
+};
+
+struct dw_edma_sg {
+	struct scatterlist		*sgl;
+	unsigned int			len;
+};
+
+struct dw_edma_cyclic {
+	dma_addr_t			paddr;
+	size_t				len;
+	size_t				cnt;
+};
+
+struct dw_edma_transfer {
+	struct dma_chan			*dchan;
+	union dw_edma_xfer {
+		struct dw_edma_sg	sg;
+		struct dw_edma_cyclic	cyclic;
+	} xfer;
+	enum dma_transfer_direction	direction;
+	unsigned long			flags;
+	bool				cyclic;
+};
+
+static inline
+struct dw_edma_chan *vc2dw_edma_chan(struct virt_dma_chan *vc)
+{
+	return container_of(vc, struct dw_edma_chan, vc);
+}
+
+static inline
+struct dw_edma_chan *dchan2dw_edma_chan(struct dma_chan *dchan)
+{
+	return vc2dw_edma_chan(to_virt_chan(dchan));
+}
+
+#endif /* _DW_EDMA_CORE_H */
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
new file mode 100644
index 0000000000000000000000000000000000000000..4c96e1c948f2be68489b264b69f488523f520995
--- /dev/null
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare eDMA PCIe driver
+ *
+ * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/dma/edma.h>
+#include <linux/pci-epf.h>
+#include <linux/msi.h>
+
+#include "dw-edma-core.h"
+
+struct dw_edma_pcie_data {
+	/* eDMA registers location */
+	enum pci_barno			rg_bar;
+	off_t				rg_off;
+	size_t				rg_sz;
+	/* eDMA memory linked list location */
+	enum pci_barno			ll_bar;
+	off_t				ll_off;
+	size_t				ll_sz;
+	/* eDMA memory data location */
+	enum pci_barno			dt_bar;
+	off_t				dt_off;
+	size_t				dt_sz;
+	/* Other */
+	u32				version;
+	enum dw_edma_mode		mode;
+	u8				irqs;
+};
+
+static const struct dw_edma_pcie_data snps_edda_data = {
+	/* eDMA registers location */
+	.rg_bar				= BAR_0,
+	.rg_off				= 0x00001000,	/*  4 Kbytes */
+	.rg_sz				= 0x00002000,	/*  8 Kbytes */
+	/* eDMA memory linked list location */
+	.ll_bar				= BAR_2,
+	.ll_off				= 0x00000000,	/*  0 Kbytes */
+	.ll_sz				= 0x00800000,	/*  8 Mbytes */
+	/* eDMA memory data location */
+	.dt_bar				= BAR_2,
+	.dt_off				= 0x00800000,	/*  8 Mbytes */
+	.dt_sz				= 0x03800000,	/* 56 Mbytes */
+	/* Other */
+	.version			= 0,
+	.mode				= EDMA_MODE_UNROLL,
+	.irqs				= 1,
+};
+
+static int dw_edma_pcie_probe(struct pci_dev *pdev,
+			      const struct pci_device_id *pid)
+{
+	const struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
+	struct device *dev = &pdev->dev;
+	struct dw_edma_chip *chip;
+	int err, nr_irqs;
+	struct dw_edma *dw;
+
+	/* Enable PCI device */
+	err = pcim_enable_device(pdev);
+	if (err) {
+		pci_err(pdev, "enabling device failed\n");
+		return err;
+	}
+
+	/* Mapping PCI BAR regions */
+	err = pcim_iomap_regions(pdev, BIT(pdata->rg_bar) |
+				       BIT(pdata->ll_bar) |
+				       BIT(pdata->dt_bar),
+				 pci_name(pdev));
+	if (err) {
+		pci_err(pdev, "eDMA BAR I/O remapping failed\n");
+		return err;
+	}
+
+	pci_set_master(pdev);
+
+	/* DMA configuration */
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (!err) {
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (err) {
+			pci_err(pdev, "consistent DMA mask 64 set failed\n");
+			return err;
+		}
+	} else {
+		pci_err(pdev, "DMA mask 64 set failed\n");
+
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			pci_err(pdev, "DMA mask 32 set failed\n");
+			return err;
+		}
+
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			pci_err(pdev, "consistent DMA mask 32 set failed\n");
+			return err;
+		}
+	}
+
+	/* Data structure allocation */
+	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
+	if (!dw)
+		return -ENOMEM;
+
+	/* IRQs allocation */
+	nr_irqs = pci_alloc_irq_vectors(pdev, 1, pdata->irqs,
+					PCI_IRQ_MSI | PCI_IRQ_MSIX);
+	if (nr_irqs < 1) {
+		pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n",
+			nr_irqs);
+		return -EPERM;
+	}
+
+	/* Data structure initialization */
+	chip->dw = dw;
+	chip->dev = dev;
+	chip->id = pdev->devfn;
+	chip->irq = pdev->irq;
+
+	dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar];
+	dw->rg_region.vaddr += pdata->rg_off;
+	dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
+	dw->rg_region.paddr += pdata->rg_off;
+	dw->rg_region.sz = pdata->rg_sz;
+
+	dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar];
+	dw->ll_region.vaddr += pdata->ll_off;
+	dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
+	dw->ll_region.paddr += pdata->ll_off;
+	dw->ll_region.sz = pdata->ll_sz;
+
+	dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar];
+	dw->dt_region.vaddr += pdata->dt_off;
+	dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
+	dw->dt_region.paddr += pdata->dt_off;
+	dw->dt_region.sz = pdata->dt_sz;
+
+	dw->version = pdata->version;
+	dw->mode = pdata->mode;
+	dw->nr_irqs = nr_irqs;
+
+	/* Debug info */
+	pci_dbg(pdev, "Version:\t%u\n", dw->version);
+
+	pci_dbg(pdev, "Mode:\t%s\n",
+		dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
+
+	pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+		pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
+		&dw->rg_region.vaddr, &dw->rg_region.paddr);
+
+	pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+		pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
+		&dw->ll_region.vaddr, &dw->ll_region.paddr);
+
+	pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+		pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
+		&dw->dt_region.vaddr, &dw->dt_region.paddr);
+
+	pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
+
+	/* Validating if PCI interrupts were enabled */
+	if (!pci_dev_msi_enabled(pdev)) {
+		pci_err(pdev, "enable interrupt failed\n");
+		return -EPERM;
+	}
+
+	dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
+	if (!dw->irq)
+		return -ENOMEM;
+
+	/* Starting eDMA driver */
+	err = dw_edma_probe(chip);
+	if (err) {
+		pci_err(pdev, "eDMA probe failed\n");
+		return err;
+	}
+
+	/* Saving data structure reference */
+	pci_set_drvdata(pdev, chip);
+
+	return 0;
+}
+
+static void dw_edma_pcie_remove(struct pci_dev *pdev)
+{
+	struct dw_edma_chip *chip = pci_get_drvdata(pdev);
+	int err;
+
+	/* Stopping eDMA driver */
+	err = dw_edma_remove(chip);
+	if (err)
+		pci_warn(pdev, "can't remove device properly: %d\n", err);
+
+	/* Freeing IRQs */
+	pci_free_irq_vectors(pdev);
+}
+
+static const struct pci_device_id dw_edma_pcie_id_table[] = {
+	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) },
+	{ }
+};
+MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table);
+
+static struct pci_driver dw_edma_pcie_driver = {
+	.name		= "dw-edma-pcie",
+	.id_table	= dw_edma_pcie_id_table,
+	.probe		= dw_edma_pcie_probe,
+	.remove		= dw_edma_pcie_remove,
+};
+
+module_pci_driver(dw_edma_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Synopsys DesignWare eDMA PCIe driver");
+MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
new file mode 100644
index 0000000000000000000000000000000000000000..8a3180ed49a60940492d5c32f31518618e8ff97a
--- /dev/null
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare eDMA v0 core
+ *
+ * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+ */
+
+#include <linux/bitfield.h>
+
+#include "dw-edma-core.h"
+#include "dw-edma-v0-core.h"
+#include "dw-edma-v0-regs.h"
+#include "dw-edma-v0-debugfs.h"
+
+enum dw_edma_control {
+	DW_EDMA_V0_CB					= BIT(0),
+	DW_EDMA_V0_TCB					= BIT(1),
+	DW_EDMA_V0_LLP					= BIT(2),
+	DW_EDMA_V0_LIE					= BIT(3),
+	DW_EDMA_V0_RIE					= BIT(4),
+	DW_EDMA_V0_CCS					= BIT(8),
+	DW_EDMA_V0_LLE					= BIT(9),
+};
+
+static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
+{
+	return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr;
+}
+
+#define SET(dw, name, value)				\
+	writel(value, &(__dw_regs(dw)->name))
+
+#define GET(dw, name)					\
+	readl(&(__dw_regs(dw)->name))
+
+#define SET_RW(dw, dir, name, value)			\
+	do {						\
+		if ((dir) == EDMA_DIR_WRITE)		\
+			SET(dw, wr_##name, value);	\
+		else					\
+			SET(dw, rd_##name, value);	\
+	} while (0)
+
+#define GET_RW(dw, dir, name)				\
+	((dir) == EDMA_DIR_WRITE			\
+	  ? GET(dw, wr_##name)				\
+	  : GET(dw, rd_##name))
+
+#define SET_BOTH(dw, name, value)			\
+	do {						\
+		SET(dw, wr_##name, value);		\
+		SET(dw, rd_##name, value);		\
+	} while (0)
+
+static inline struct dw_edma_v0_ch_regs __iomem *
+__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
+{
+	if (dw->mode == EDMA_MODE_LEGACY)
+		return &(__dw_regs(dw)->type.legacy.ch);
+
+	if (dir == EDMA_DIR_WRITE)
+		return &__dw_regs(dw)->type.unroll.ch[ch].wr;
+
+	return &__dw_regs(dw)->type.unroll.ch[ch].rd;
+}
+
+static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
+			     u32 value, void __iomem *addr)
+{
+	if (dw->mode == EDMA_MODE_LEGACY) {
+		u32 viewport_sel;
+		unsigned long flags;
+
+		raw_spin_lock_irqsave(&dw->lock, flags);
+
+		viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
+		if (dir == EDMA_DIR_READ)
+			viewport_sel |= BIT(31);
+
+		writel(viewport_sel,
+		       &(__dw_regs(dw)->type.legacy.viewport_sel));
+		writel(value, addr);
+
+		raw_spin_unlock_irqrestore(&dw->lock, flags);
+	} else {
+		writel(value, addr);
+	}
+}
+
+static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
+			   const void __iomem *addr)
+{
+	u32 value;
+
+	if (dw->mode == EDMA_MODE_LEGACY) {
+		u32 viewport_sel;
+		unsigned long flags;
+
+		raw_spin_lock_irqsave(&dw->lock, flags);
+
+		viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
+		if (dir == EDMA_DIR_READ)
+			viewport_sel |= BIT(31);
+
+		writel(viewport_sel,
+		       &(__dw_regs(dw)->type.legacy.viewport_sel));
+		value = readl(addr);
+
+		raw_spin_unlock_irqrestore(&dw->lock, flags);
+	} else {
+		value = readl(addr);
+	}
+
+	return value;
+}
+
+#define SET_CH(dw, dir, ch, name, value) \
+	writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
+
+#define GET_CH(dw, dir, ch, name) \
+	readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
+
+#define SET_LL(ll, value) \
+	writel(value, ll)
+
+/* eDMA management callbacks */
+void dw_edma_v0_core_off(struct dw_edma *dw)
+{
+	SET_BOTH(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
+	SET_BOTH(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
+	SET_BOTH(dw, engine_en, 0);
+}
+
+u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
+{
+	u32 num_ch;
+
+	if (dir == EDMA_DIR_WRITE)
+		num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET(dw, ctrl));
+	else
+		num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET(dw, ctrl));
+
+	if (num_ch > EDMA_V0_MAX_NR_CH)
+		num_ch = EDMA_V0_MAX_NR_CH;
+
+	return (u16)num_ch;
+}
+
+enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
+{
+	struct dw_edma *dw = chan->chip->dw;
+	u32 tmp;
+
+	tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
+			GET_CH(dw, chan->dir, chan->id, ch_control1));
+
+	if (tmp == 1)
+		return DMA_IN_PROGRESS;
+	else if (tmp == 3)
+		return DMA_COMPLETE;
+	else
+		return DMA_ERROR;
+}
+
+void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
+{
+	struct dw_edma *dw = chan->chip->dw;
+
+	SET_RW(dw, chan->dir, int_clear,
+	       FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
+}
+
+void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
+{
+	struct dw_edma *dw = chan->chip->dw;
+
+	SET_RW(dw, chan->dir, int_clear,
+	       FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
+}
+
+u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
+{
+	return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW(dw, dir, int_status));
+}
+
+u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
+{
+	return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW(dw, dir, int_status));
+}
+
+static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
+{
+	struct dw_edma_burst *child;
+	struct dw_edma_v0_lli *lli;
+	struct dw_edma_v0_llp *llp;
+	u32 control = 0, i = 0;
+	u64 sar, dar, addr;
+	int j;
+
+	lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr;
+
+	if (chunk->cb)
+		control = DW_EDMA_V0_CB;
+
+	j = chunk->bursts_alloc;
+	list_for_each_entry(child, &chunk->burst->list, list) {
+		j--;
+		if (!j)
+			control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE);
+
+		/* Channel control */
+		SET_LL(&lli[i].control, control);
+		/* Transfer size */
+		SET_LL(&lli[i].transfer_size, child->sz);
+		/* SAR - low, high */
+		sar = cpu_to_le64(child->sar);
+		SET_LL(&lli[i].sar_low, lower_32_bits(sar));
+		SET_LL(&lli[i].sar_high, upper_32_bits(sar));
+		/* DAR - low, high */
+		dar = cpu_to_le64(child->dar);
+		SET_LL(&lli[i].dar_low, lower_32_bits(dar));
+		SET_LL(&lli[i].dar_high, upper_32_bits(dar));
+		i++;
+	}
+
+	llp = (struct dw_edma_v0_llp *)&lli[i];
+	control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
+	if (!chunk->cb)
+		control |= DW_EDMA_V0_CB;
+
+	/* Channel control */
+	SET_LL(&llp->control, control);
+	/* Linked list  - low, high */
+	addr = cpu_to_le64(chunk->ll_region.paddr);
+	SET_LL(&llp->llp_low, lower_32_bits(addr));
+	SET_LL(&llp->llp_high, upper_32_bits(addr));
+}
+
+void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
+{
+	struct dw_edma_chan *chan = chunk->chan;
+	struct dw_edma *dw = chan->chip->dw;
+	u32 tmp;
+	u64 llp;
+
+	dw_edma_v0_core_write_chunk(chunk);
+
+	if (first) {
+		/* Enable engine */
+		SET_RW(dw, chan->dir, engine_en, BIT(0));
+		/* Interrupt unmask - done, abort */
+		tmp = GET_RW(dw, chan->dir, int_mask);
+		tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
+		tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id));
+		SET_RW(dw, chan->dir, int_mask, tmp);
+		/* Linked list error */
+		tmp = GET_RW(dw, chan->dir, linked_list_err_en);
+		tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id));
+		SET_RW(dw, chan->dir, linked_list_err_en, tmp);
+		/* Channel control */
+		SET_CH(dw, chan->dir, chan->id, ch_control1,
+		       (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
+		/* Linked list - low, high */
+		llp = cpu_to_le64(chunk->ll_region.paddr);
+		SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp));
+		SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp));
+	}
+	/* Doorbell */
+	SET_RW(dw, chan->dir, doorbell,
+	       FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
+}
+
+int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
+{
+	struct dw_edma *dw = chan->chip->dw;
+	u32 tmp = 0;
+
+	/* MSI done addr - low, high */
+	SET_RW(dw, chan->dir, done_imwr_low, chan->msi.address_lo);
+	SET_RW(dw, chan->dir, done_imwr_high, chan->msi.address_hi);
+	/* MSI abort addr - low, high */
+	SET_RW(dw, chan->dir, abort_imwr_low, chan->msi.address_lo);
+	SET_RW(dw, chan->dir, abort_imwr_high, chan->msi.address_hi);
+	/* MSI data - low, high */
+	switch (chan->id) {
+	case 0:
+	case 1:
+		tmp = GET_RW(dw, chan->dir, ch01_imwr_data);
+		break;
+
+	case 2:
+	case 3:
+		tmp = GET_RW(dw, chan->dir, ch23_imwr_data);
+		break;
+
+	case 4:
+	case 5:
+		tmp = GET_RW(dw, chan->dir, ch45_imwr_data);
+		break;
+
+	case 6:
+	case 7:
+		tmp = GET_RW(dw, chan->dir, ch67_imwr_data);
+		break;
+	}
+
+	if (chan->id & BIT(0)) {
+		/* Channel odd {1, 3, 5, 7} */
+		tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK;
+		tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK,
+				  chan->msi.data);
+	} else {
+		/* Channel even {0, 2, 4, 6} */
+		tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK;
+		tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK,
+				  chan->msi.data);
+	}
+
+	switch (chan->id) {
+	case 0:
+	case 1:
+		SET_RW(dw, chan->dir, ch01_imwr_data, tmp);
+		break;
+
+	case 2:
+	case 3:
+		SET_RW(dw, chan->dir, ch23_imwr_data, tmp);
+		break;
+
+	case 4:
+	case 5:
+		SET_RW(dw, chan->dir, ch45_imwr_data, tmp);
+		break;
+
+	case 6:
+	case 7:
+		SET_RW(dw, chan->dir, ch67_imwr_data, tmp);
+		break;
+	}
+
+	return 0;
+}
+
+/* eDMA debugfs callbacks */
+void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip)
+{
+	dw_edma_v0_debugfs_on(chip);
+}
+
+void dw_edma_v0_core_debugfs_off(void)
+{
+	dw_edma_v0_debugfs_off();
+}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h
new file mode 100644
index 0000000000000000000000000000000000000000..abae1527f1f9b41cde74afb0246ea2bead843cdd
--- /dev/null
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare eDMA v0 core
+ *
+ * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+ */
+
+#ifndef _DW_EDMA_V0_CORE_H
+#define _DW_EDMA_V0_CORE_H
+
+#include <linux/dma/edma.h>
+
+/* eDMA management callbacks */
+void dw_edma_v0_core_off(struct dw_edma *chan);
+u16 dw_edma_v0_core_ch_count(struct dw_edma *chan, enum dw_edma_dir dir);
+enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan);
+void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan);
+void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan);
+u32 dw_edma_v0_core_status_done_int(struct dw_edma *chan, enum dw_edma_dir dir);
+u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir);
+void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
+int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
+/* eDMA debug fs callbacks */
+void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip);
+void dw_edma_v0_core_debugfs_off(void);
+
+#endif /* _DW_EDMA_V0_CORE_H */
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
new file mode 100644
index 0000000000000000000000000000000000000000..3226f528cc11395409ae503f45ce9816ead6d8bc
--- /dev/null
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare eDMA v0 core
+ *
+ * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/bitfield.h>
+
+#include "dw-edma-v0-debugfs.h"
+#include "dw-edma-v0-regs.h"
+#include "dw-edma-core.h"
+
+#define REGS_ADDR(name) \
+	((dma_addr_t *)&regs->name)
+#define REGISTER(name) \
+	{ #name, REGS_ADDR(name) }
+
+#define WR_REGISTER(name) \
+	{ #name, REGS_ADDR(wr_##name) }
+#define RD_REGISTER(name) \
+	{ #name, REGS_ADDR(rd_##name) }
+
+#define WR_REGISTER_LEGACY(name) \
+	{ #name, REGS_ADDR(type.legacy.wr_##name) }
+#define RD_REGISTER_LEGACY(name) \
+	{ #name, REGS_ADDR(type.legacy.rd_##name) }
+
+#define WR_REGISTER_UNROLL(name) \
+	{ #name, REGS_ADDR(type.unroll.wr_##name) }
+#define RD_REGISTER_UNROLL(name) \
+	{ #name, REGS_ADDR(type.unroll.rd_##name) }
+
+#define WRITE_STR				"write"
+#define READ_STR				"read"
+#define CHANNEL_STR				"channel"
+#define REGISTERS_STR				"registers"
+
+static struct dentry				*base_dir;
+static struct dw_edma				*dw;
+static struct dw_edma_v0_regs			*regs;
+
+static struct {
+	void					*start;
+	void					*end;
+} lim[2][EDMA_V0_MAX_NR_CH];
+
+struct debugfs_entries {
+	char					name[24];
+	dma_addr_t				*reg;
+};
+
+static int dw_edma_debugfs_u32_get(void *data, u64 *val)
+{
+	if (dw->mode == EDMA_MODE_LEGACY &&
+	    data >= (void *)&regs->type.legacy.ch) {
+		void *ptr = (void *)&regs->type.legacy.ch;
+		u32 viewport_sel = 0;
+		unsigned long flags;
+		u16 ch;
+
+		for (ch = 0; ch < dw->wr_ch_cnt; ch++)
+			if (lim[0][ch].start >= data && data < lim[0][ch].end) {
+				ptr += (data - lim[0][ch].start);
+				goto legacy_sel_wr;
+			}
+
+		for (ch = 0; ch < dw->rd_ch_cnt; ch++)
+			if (lim[1][ch].start >= data && data < lim[1][ch].end) {
+				ptr += (data - lim[1][ch].start);
+				goto legacy_sel_rd;
+			}
+
+		return 0;
+legacy_sel_rd:
+		viewport_sel = BIT(31);
+legacy_sel_wr:
+		viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
+
+		raw_spin_lock_irqsave(&dw->lock, flags);
+
+		writel(viewport_sel, &regs->type.legacy.viewport_sel);
+		*val = readl(ptr);
+
+		raw_spin_unlock_irqrestore(&dw->lock, flags);
+	} else {
+		*val = readl(data);
+	}
+
+	return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n");
+
+static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
+				       int nr_entries, struct dentry *dir)
+{
+	int i;
+
+	for (i = 0; i < nr_entries; i++) {
+		if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir,
+						entries[i].reg,	&fops_x32))
+			break;
+	}
+}
+
+static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs,
+				    struct dentry *dir)
+{
+	int nr_entries;
+	const struct debugfs_entries debugfs_regs[] = {
+		REGISTER(ch_control1),
+		REGISTER(ch_control2),
+		REGISTER(transfer_size),
+		REGISTER(sar_low),
+		REGISTER(sar_high),
+		REGISTER(dar_low),
+		REGISTER(dar_high),
+		REGISTER(llp_low),
+		REGISTER(llp_high),
+	};
+
+	nr_entries = ARRAY_SIZE(debugfs_regs);
+	dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir);
+}
+
+static void dw_edma_debugfs_regs_wr(struct dentry *dir)
+{
+	const struct debugfs_entries debugfs_regs[] = {
+		/* eDMA global registers */
+		WR_REGISTER(engine_en),
+		WR_REGISTER(doorbell),
+		WR_REGISTER(ch_arb_weight_low),
+		WR_REGISTER(ch_arb_weight_high),
+		/* eDMA interrupts registers */
+		WR_REGISTER(int_status),
+		WR_REGISTER(int_mask),
+		WR_REGISTER(int_clear),
+		WR_REGISTER(err_status),
+		WR_REGISTER(done_imwr_low),
+		WR_REGISTER(done_imwr_high),
+		WR_REGISTER(abort_imwr_low),
+		WR_REGISTER(abort_imwr_high),
+		WR_REGISTER(ch01_imwr_data),
+		WR_REGISTER(ch23_imwr_data),
+		WR_REGISTER(ch45_imwr_data),
+		WR_REGISTER(ch67_imwr_data),
+		WR_REGISTER(linked_list_err_en),
+	};
+	const struct debugfs_entries debugfs_unroll_regs[] = {
+		/* eDMA channel context grouping */
+		WR_REGISTER_UNROLL(engine_chgroup),
+		WR_REGISTER_UNROLL(engine_hshake_cnt_low),
+		WR_REGISTER_UNROLL(engine_hshake_cnt_high),
+		WR_REGISTER_UNROLL(ch0_pwr_en),
+		WR_REGISTER_UNROLL(ch1_pwr_en),
+		WR_REGISTER_UNROLL(ch2_pwr_en),
+		WR_REGISTER_UNROLL(ch3_pwr_en),
+		WR_REGISTER_UNROLL(ch4_pwr_en),
+		WR_REGISTER_UNROLL(ch5_pwr_en),
+		WR_REGISTER_UNROLL(ch6_pwr_en),
+		WR_REGISTER_UNROLL(ch7_pwr_en),
+	};
+	struct dentry *regs_dir, *ch_dir;
+	int nr_entries, i;
+	char name[16];
+
+	regs_dir = debugfs_create_dir(WRITE_STR, dir);
+	if (!regs_dir)
+		return;
+
+	nr_entries = ARRAY_SIZE(debugfs_regs);
+	dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
+
+	if (dw->mode == EDMA_MODE_UNROLL) {
+		nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
+		dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
+					   regs_dir);
+	}
+
+	for (i = 0; i < dw->wr_ch_cnt; i++) {
+		snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
+
+		ch_dir = debugfs_create_dir(name, regs_dir);
+		if (!ch_dir)
+			return;
+
+		dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].wr, ch_dir);
+
+		lim[0][i].start = &regs->type.unroll.ch[i].wr;
+		lim[0][i].end = &regs->type.unroll.ch[i].padding_1[0];
+	}
+}
+
+static void dw_edma_debugfs_regs_rd(struct dentry *dir)
+{
+	const struct debugfs_entries debugfs_regs[] = {
+		/* eDMA global registers */
+		RD_REGISTER(engine_en),
+		RD_REGISTER(doorbell),
+		RD_REGISTER(ch_arb_weight_low),
+		RD_REGISTER(ch_arb_weight_high),
+		/* eDMA interrupts registers */
+		RD_REGISTER(int_status),
+		RD_REGISTER(int_mask),
+		RD_REGISTER(int_clear),
+		RD_REGISTER(err_status_low),
+		RD_REGISTER(err_status_high),
+		RD_REGISTER(linked_list_err_en),
+		RD_REGISTER(done_imwr_low),
+		RD_REGISTER(done_imwr_high),
+		RD_REGISTER(abort_imwr_low),
+		RD_REGISTER(abort_imwr_high),
+		RD_REGISTER(ch01_imwr_data),
+		RD_REGISTER(ch23_imwr_data),
+		RD_REGISTER(ch45_imwr_data),
+		RD_REGISTER(ch67_imwr_data),
+	};
+	const struct debugfs_entries debugfs_unroll_regs[] = {
+		/* eDMA channel context grouping */
+		RD_REGISTER_UNROLL(engine_chgroup),
+		RD_REGISTER_UNROLL(engine_hshake_cnt_low),
+		RD_REGISTER_UNROLL(engine_hshake_cnt_high),
+		RD_REGISTER_UNROLL(ch0_pwr_en),
+		RD_REGISTER_UNROLL(ch1_pwr_en),
+		RD_REGISTER_UNROLL(ch2_pwr_en),
+		RD_REGISTER_UNROLL(ch3_pwr_en),
+		RD_REGISTER_UNROLL(ch4_pwr_en),
+		RD_REGISTER_UNROLL(ch5_pwr_en),
+		RD_REGISTER_UNROLL(ch6_pwr_en),
+		RD_REGISTER_UNROLL(ch7_pwr_en),
+	};
+	struct dentry *regs_dir, *ch_dir;
+	int nr_entries, i;
+	char name[16];
+
+	regs_dir = debugfs_create_dir(READ_STR, dir);
+	if (!regs_dir)
+		return;
+
+	nr_entries = ARRAY_SIZE(debugfs_regs);
+	dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
+
+	if (dw->mode == EDMA_MODE_UNROLL) {
+		nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
+		dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
+					   regs_dir);
+	}
+
+	for (i = 0; i < dw->rd_ch_cnt; i++) {
+		snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
+
+		ch_dir = debugfs_create_dir(name, regs_dir);
+		if (!ch_dir)
+			return;
+
+		dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].rd, ch_dir);
+
+		lim[1][i].start = &regs->type.unroll.ch[i].rd;
+		lim[1][i].end = &regs->type.unroll.ch[i].padding_2[0];
+	}
+}
+
+static void dw_edma_debugfs_regs(void)
+{
+	const struct debugfs_entries debugfs_regs[] = {
+		REGISTER(ctrl_data_arb_prior),
+		REGISTER(ctrl),
+	};
+	struct dentry *regs_dir;
+	int nr_entries;
+
+	regs_dir = debugfs_create_dir(REGISTERS_STR, base_dir);
+	if (!regs_dir)
+		return;
+
+	nr_entries = ARRAY_SIZE(debugfs_regs);
+	dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
+
+	dw_edma_debugfs_regs_wr(regs_dir);
+	dw_edma_debugfs_regs_rd(regs_dir);
+}
+
+void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
+{
+	dw = chip->dw;
+	if (!dw)
+		return;
+
+	regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr;
+	if (!regs)
+		return;
+
+	base_dir = debugfs_create_dir(dw->name, 0);
+	if (!base_dir)
+		return;
+
+	debugfs_create_u32("version", 0444, base_dir, &dw->version);
+	debugfs_create_u32("mode", 0444, base_dir, &dw->mode);
+	debugfs_create_u16("wr_ch_cnt", 0444, base_dir, &dw->wr_ch_cnt);
+	debugfs_create_u16("rd_ch_cnt", 0444, base_dir, &dw->rd_ch_cnt);
+
+	dw_edma_debugfs_regs();
+}
+
+void dw_edma_v0_debugfs_off(void)
+{
+	debugfs_remove_recursive(base_dir);
+}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
new file mode 100644
index 0000000000000000000000000000000000000000..5450a0a941938bfe2a75f6049ebdec45ac0f3f62
--- /dev/null
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare eDMA v0 core
+ *
+ * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+ */
+
+#ifndef _DW_EDMA_V0_DEBUG_FS_H
+#define _DW_EDMA_V0_DEBUG_FS_H
+
+#include <linux/dma/edma.h>
+
+#ifdef CONFIG_DEBUG_FS
+void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip);
+void dw_edma_v0_debugfs_off(void);
+#else
+static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
+{
+}
+
+static inline void dw_edma_v0_debugfs_off(void)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* _DW_EDMA_V0_DEBUG_FS_H */
diff --git a/drivers/dma/dw-edma/dw-edma-v0-regs.h b/drivers/dma/dw-edma/dw-edma-v0-regs.h
new file mode 100644
index 0000000000000000000000000000000000000000..cd64768845079060b2071a403b602b9d7bf914aa
--- /dev/null
+++ b/drivers/dma/dw-edma/dw-edma-v0-regs.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare eDMA v0 core
+ *
+ * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+ */
+
+#ifndef _DW_EDMA_V0_REGS_H
+#define _DW_EDMA_V0_REGS_H
+
+#include <linux/dmaengine.h>
+
+#define EDMA_V0_MAX_NR_CH				8
+#define EDMA_V0_VIEWPORT_MASK				GENMASK(2, 0)
+#define EDMA_V0_DONE_INT_MASK				GENMASK(7, 0)
+#define EDMA_V0_ABORT_INT_MASK				GENMASK(23, 16)
+#define EDMA_V0_WRITE_CH_COUNT_MASK			GENMASK(3, 0)
+#define EDMA_V0_READ_CH_COUNT_MASK			GENMASK(19, 16)
+#define EDMA_V0_CH_STATUS_MASK				GENMASK(6, 5)
+#define EDMA_V0_DOORBELL_CH_MASK			GENMASK(2, 0)
+#define EDMA_V0_LINKED_LIST_ERR_MASK			GENMASK(7, 0)
+
+#define EDMA_V0_CH_ODD_MSI_DATA_MASK			GENMASK(31, 16)
+#define EDMA_V0_CH_EVEN_MSI_DATA_MASK			GENMASK(15, 0)
+
+struct dw_edma_v0_ch_regs {
+	u32 ch_control1;				/* 0x000 */
+	u32 ch_control2;				/* 0x004 */
+	u32 transfer_size;				/* 0x008 */
+	u32 sar_low;					/* 0x00c */
+	u32 sar_high;					/* 0x010 */
+	u32 dar_low;					/* 0x014 */
+	u32 dar_high;					/* 0x018 */
+	u32 llp_low;					/* 0x01c */
+	u32 llp_high;					/* 0x020 */
+};
+
+struct dw_edma_v0_ch {
+	struct dw_edma_v0_ch_regs wr;			/* 0x200 */
+	u32 padding_1[55];				/* [0x224..0x2fc] */
+	struct dw_edma_v0_ch_regs rd;			/* 0x300 */
+	u32 padding_2[55];				/* [0x224..0x2fc] */
+};
+
+struct dw_edma_v0_unroll {
+	u32 padding_1;					/* 0x0f8 */
+	u32 wr_engine_chgroup;				/* 0x100 */
+	u32 rd_engine_chgroup;				/* 0x104 */
+	u32 wr_engine_hshake_cnt_low;			/* 0x108 */
+	u32 wr_engine_hshake_cnt_high;			/* 0x10c */
+	u32 padding_2[2];				/* [0x110..0x114] */
+	u32 rd_engine_hshake_cnt_low;			/* 0x118 */
+	u32 rd_engine_hshake_cnt_high;			/* 0x11c */
+	u32 padding_3[2];				/* [0x120..0x124] */
+	u32 wr_ch0_pwr_en;				/* 0x128 */
+	u32 wr_ch1_pwr_en;				/* 0x12c */
+	u32 wr_ch2_pwr_en;				/* 0x130 */
+	u32 wr_ch3_pwr_en;				/* 0x134 */
+	u32 wr_ch4_pwr_en;				/* 0x138 */
+	u32 wr_ch5_pwr_en;				/* 0x13c */
+	u32 wr_ch6_pwr_en;				/* 0x140 */
+	u32 wr_ch7_pwr_en;				/* 0x144 */
+	u32 padding_4[8];				/* [0x148..0x164] */
+	u32 rd_ch0_pwr_en;				/* 0x168 */
+	u32 rd_ch1_pwr_en;				/* 0x16c */
+	u32 rd_ch2_pwr_en;				/* 0x170 */
+	u32 rd_ch3_pwr_en;				/* 0x174 */
+	u32 rd_ch4_pwr_en;				/* 0x178 */
+	u32 rd_ch5_pwr_en;				/* 0x18c */
+	u32 rd_ch6_pwr_en;				/* 0x180 */
+	u32 rd_ch7_pwr_en;				/* 0x184 */
+	u32 padding_5[30];				/* [0x188..0x1fc] */
+	struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH];	/* [0x200..0x1120] */
+};
+
+struct dw_edma_v0_legacy {
+	u32 viewport_sel;				/* 0x0f8 */
+	struct dw_edma_v0_ch_regs ch;			/* [0x100..0x120] */
+};
+
+struct dw_edma_v0_regs {
+	/* eDMA global registers */
+	u32 ctrl_data_arb_prior;			/* 0x000 */
+	u32 padding_1;					/* 0x004 */
+	u32 ctrl;					/* 0x008 */
+	u32 wr_engine_en;				/* 0x00c */
+	u32 wr_doorbell;				/* 0x010 */
+	u32 padding_2;					/* 0x014 */
+	u32 wr_ch_arb_weight_low;			/* 0x018 */
+	u32 wr_ch_arb_weight_high;			/* 0x01c */
+	u32 padding_3[3];				/* [0x020..0x028] */
+	u32 rd_engine_en;				/* 0x02c */
+	u32 rd_doorbell;				/* 0x030 */
+	u32 padding_4;					/* 0x034 */
+	u32 rd_ch_arb_weight_low;			/* 0x038 */
+	u32 rd_ch_arb_weight_high;			/* 0x03c */
+	u32 padding_5[3];				/* [0x040..0x048] */
+	/* eDMA interrupts registers */
+	u32 wr_int_status;				/* 0x04c */
+	u32 padding_6;					/* 0x050 */
+	u32 wr_int_mask;				/* 0x054 */
+	u32 wr_int_clear;				/* 0x058 */
+	u32 wr_err_status;				/* 0x05c */
+	u32 wr_done_imwr_low;				/* 0x060 */
+	u32 wr_done_imwr_high;				/* 0x064 */
+	u32 wr_abort_imwr_low;				/* 0x068 */
+	u32 wr_abort_imwr_high;				/* 0x06c */
+	u32 wr_ch01_imwr_data;				/* 0x070 */
+	u32 wr_ch23_imwr_data;				/* 0x074 */
+	u32 wr_ch45_imwr_data;				/* 0x078 */
+	u32 wr_ch67_imwr_data;				/* 0x07c */
+	u32 padding_7[4];				/* [0x080..0x08c] */
+	u32 wr_linked_list_err_en;			/* 0x090 */
+	u32 padding_8[3];				/* [0x094..0x09c] */
+	u32 rd_int_status;				/* 0x0a0 */
+	u32 padding_9;					/* 0x0a4 */
+	u32 rd_int_mask;				/* 0x0a8 */
+	u32 rd_int_clear;				/* 0x0ac */
+	u32 padding_10;					/* 0x0b0 */
+	u32 rd_err_status_low;				/* 0x0b4 */
+	u32 rd_err_status_high;				/* 0x0b8 */
+	u32 padding_11[2];				/* [0x0bc..0x0c0] */
+	u32 rd_linked_list_err_en;			/* 0x0c4 */
+	u32 padding_12;					/* 0x0c8 */
+	u32 rd_done_imwr_low;				/* 0x0cc */
+	u32 rd_done_imwr_high;				/* 0x0d0 */
+	u32 rd_abort_imwr_low;				/* 0x0d4 */
+	u32 rd_abort_imwr_high;				/* 0x0d8 */
+	u32 rd_ch01_imwr_data;				/* 0x0dc */
+	u32 rd_ch23_imwr_data;				/* 0x0e0 */
+	u32 rd_ch45_imwr_data;				/* 0x0e4 */
+	u32 rd_ch67_imwr_data;				/* 0x0e8 */
+	u32 padding_13[4];				/* [0x0ec..0x0f8] */
+	/* eDMA channel context grouping */
+	union dw_edma_v0_type {
+		struct dw_edma_v0_legacy legacy;	/* [0x0f8..0x120] */
+		struct dw_edma_v0_unroll unroll;	/* [0x0f8..0x1120] */
+	} type;
+};
+
+struct dw_edma_v0_lli {
+	u32 control;
+	u32 transfer_size;
+	u32 sar_low;
+	u32 sar_high;
+	u32 dar_low;
+	u32 dar_high;
+};
+
+struct dw_edma_v0_llp {
+	u32 control;
+	u32 reserved;
+	u32 llp_low;
+	u32 llp_high;
+};
+
+#endif /* _DW_EDMA_V0_REGS_H */
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index e79a75db0852672b151b444f0658c98cdf371999..8de87b15a9889e1976171cd0f7342e875d652fb2 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -15,10 +15,13 @@
 struct dw_dma_pci_data {
 	const struct dw_dma_platform_data *pdata;
 	int (*probe)(struct dw_dma_chip *chip);
+	int (*remove)(struct dw_dma_chip *chip);
+	struct dw_dma_chip *chip;
 };
 
 static const struct dw_dma_pci_data dw_pci_data = {
 	.probe = dw_dma_probe,
+	.remove = dw_dma_remove,
 };
 
 static const struct dw_dma_platform_data idma32_pdata = {
@@ -34,11 +37,13 @@ static const struct dw_dma_platform_data idma32_pdata = {
 static const struct dw_dma_pci_data idma32_pci_data = {
 	.pdata = &idma32_pdata,
 	.probe = idma32_dma_probe,
+	.remove = idma32_dma_remove,
 };
 
 static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
 {
-	const struct dw_dma_pci_data *data = (void *)pid->driver_data;
+	const struct dw_dma_pci_data *drv_data = (void *)pid->driver_data;
+	struct dw_dma_pci_data *data;
 	struct dw_dma_chip *chip;
 	int ret;
 
@@ -63,6 +68,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
 	if (ret)
 		return ret;
 
+	data = devm_kmemdup(&pdev->dev, drv_data, sizeof(*drv_data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
 	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
 	if (!chip)
 		return -ENOMEM;
@@ -73,21 +82,24 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
 	chip->irq = pdev->irq;
 	chip->pdata = data->pdata;
 
+	data->chip = chip;
+
 	ret = data->probe(chip);
 	if (ret)
 		return ret;
 
-	pci_set_drvdata(pdev, chip);
+	pci_set_drvdata(pdev, data);
 
 	return 0;
 }
 
 static void dw_pci_remove(struct pci_dev *pdev)
 {
-	struct dw_dma_chip *chip = pci_get_drvdata(pdev);
+	struct dw_dma_pci_data *data = pci_get_drvdata(pdev);
+	struct dw_dma_chip *chip = data->chip;
 	int ret;
 
-	ret = dw_dma_remove(chip);
+	ret = data->remove(chip);
 	if (ret)
 		dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret);
 }
@@ -96,16 +108,16 @@ static void dw_pci_remove(struct pci_dev *pdev)
 
 static int dw_pci_suspend_late(struct device *dev)
 {
-	struct pci_dev *pci = to_pci_dev(dev);
-	struct dw_dma_chip *chip = pci_get_drvdata(pci);
+	struct dw_dma_pci_data *data = dev_get_drvdata(dev);
+	struct dw_dma_chip *chip = data->chip;
 
 	return do_dw_dma_disable(chip);
 };
 
 static int dw_pci_resume_early(struct device *dev)
 {
-	struct pci_dev *pci = to_pci_dev(dev);
-	struct dw_dma_chip *chip = pci_get_drvdata(pci);
+	struct dw_dma_pci_data *data = dev_get_drvdata(dev);
+	struct dw_dma_chip *chip = data->chip;
 
 	return do_dw_dma_enable(chip);
 };
@@ -131,6 +143,11 @@ static const struct pci_device_id dw_pci_id_table[] = {
 	{ PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data },
 	{ PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data },
 
+	/* Elkhart Lake iDMA 32-bit (OSE DMA) */
+	{ PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_pci_data },
+	{ PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_pci_data },
+	{ PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_pci_data },
+
 	/* Haswell */
 	{ PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data },
 
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 680b2a00a953221547383a082af38f413d063502..44d92c34dec31d4850566ca8d1809e623164e785 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -47,7 +47,7 @@ static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
 	struct edma_regs *regs = &fsl_chan->edma->regs;
 	u32 ch = fsl_chan->vchan.chan.chan_id;
 
-	if (fsl_chan->edma->version == v1) {
+	if (fsl_chan->edma->drvdata->version == v1) {
 		edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
 		edma_writeb(fsl_chan->edma, ch, regs->serq);
 	} else {
@@ -64,7 +64,7 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
 	struct edma_regs *regs = &fsl_chan->edma->regs;
 	u32 ch = fsl_chan->vchan.chan.chan_id;
 
-	if (fsl_chan->edma->version == v1) {
+	if (fsl_chan->edma->drvdata->version == v1) {
 		edma_writeb(fsl_chan->edma, ch, regs->cerq);
 		edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
 	} else {
@@ -77,22 +77,33 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
 }
 EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
 
+static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
+			   u32 off, u32 slot, bool enable)
+{
+	u8 val8;
+
+	if (enable)
+		val8 = EDMAMUX_CHCFG_ENBL | slot;
+	else
+		val8 = EDMAMUX_CHCFG_DIS;
+
+	iowrite8(val8, addr + off);
+}
+
 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
 			unsigned int slot, bool enable)
 {
 	u32 ch = fsl_chan->vchan.chan.chan_id;
 	void __iomem *muxaddr;
 	unsigned int chans_per_mux, ch_off;
+	u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
 
-	chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
+	chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
 	ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
 	muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
 	slot = EDMAMUX_CHCFG_SOURCE(slot);
 
-	if (enable)
-		iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
-	else
-		iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
+	mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
 }
 EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
 
@@ -647,28 +658,28 @@ void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
 	edma->regs.erql = edma->membase + EDMA_ERQ;
 	edma->regs.eeil = edma->membase + EDMA_EEI;
 
-	edma->regs.serq = edma->membase + ((edma->version == v1) ?
-			EDMA_SERQ : EDMA64_SERQ);
-	edma->regs.cerq = edma->membase + ((edma->version == v1) ?
-			EDMA_CERQ : EDMA64_CERQ);
-	edma->regs.seei = edma->membase + ((edma->version == v1) ?
-			EDMA_SEEI : EDMA64_SEEI);
-	edma->regs.ceei = edma->membase + ((edma->version == v1) ?
-			EDMA_CEEI : EDMA64_CEEI);
-	edma->regs.cint = edma->membase + ((edma->version == v1) ?
-			EDMA_CINT : EDMA64_CINT);
-	edma->regs.cerr = edma->membase + ((edma->version == v1) ?
-			EDMA_CERR : EDMA64_CERR);
-	edma->regs.ssrt = edma->membase + ((edma->version == v1) ?
-			EDMA_SSRT : EDMA64_SSRT);
-	edma->regs.cdne = edma->membase + ((edma->version == v1) ?
-			EDMA_CDNE : EDMA64_CDNE);
-	edma->regs.intl = edma->membase + ((edma->version == v1) ?
-			EDMA_INTR : EDMA64_INTL);
-	edma->regs.errl = edma->membase + ((edma->version == v1) ?
-			EDMA_ERR : EDMA64_ERRL);
-
-	if (edma->version == v2) {
+	edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ?
+			EDMA64_SERQ : EDMA_SERQ);
+	edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ?
+			EDMA64_CERQ : EDMA_CERQ);
+	edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ?
+			EDMA64_SEEI : EDMA_SEEI);
+	edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ?
+			EDMA64_CEEI : EDMA_CEEI);
+	edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ?
+			EDMA64_CINT : EDMA_CINT);
+	edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ?
+			EDMA64_CERR : EDMA_CERR);
+	edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ?
+			EDMA64_SSRT : EDMA_SSRT);
+	edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ?
+			EDMA64_CDNE : EDMA_CDNE);
+	edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ?
+			EDMA64_INTL : EDMA_INTR);
+	edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ?
+			EDMA64_ERRL : EDMA_ERR);
+
+	if (edma->drvdata->version == v2) {
 		edma->regs.erqh = edma->membase + EDMA64_ERQH;
 		edma->regs.eeih = edma->membase + EDMA64_EEIH;
 		edma->regs.errh = edma->membase + EDMA64_ERRH;
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index c53f76eeb4d3690f64a788ee8a656abfc798ed7a..4e175560292ca26ab3370ae2e88e96a3ca0bcdbb 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -7,6 +7,7 @@
 #define _FSL_EDMA_COMMON_H_
 
 #include <linux/dma-direction.h>
+#include <linux/platform_device.h>
 #include "virt-dma.h"
 
 #define EDMA_CR_EDBG		BIT(1)
@@ -140,17 +141,24 @@ enum edma_version {
 	v2, /* 64ch Coldfire */
 };
 
+struct fsl_edma_drvdata {
+	enum edma_version	version;
+	u32			dmamuxs;
+	int			(*setup_irq)(struct platform_device *pdev,
+					     struct fsl_edma_engine *fsl_edma);
+};
+
 struct fsl_edma_engine {
 	struct dma_device	dma_dev;
 	void __iomem		*membase;
 	void __iomem		*muxbase[DMAMUX_NR];
 	struct clk		*muxclk[DMAMUX_NR];
 	struct mutex		fsl_edma_mutex;
+	const struct fsl_edma_drvdata *drvdata;
 	u32			n_chans;
 	int			txirq;
 	int			errirq;
 	bool			big_endian;
-	enum edma_version	version;
 	struct edma_regs	regs;
 	struct fsl_edma_chan	chans[];
 };
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 0ddad3adb761f01755dbab3711f9ee218961ad28..fcbad6ae954a820774f994fbaf1c99d7130df005 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -92,7 +92,8 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
 	struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
 	struct dma_chan *chan, *_chan;
 	struct fsl_edma_chan *fsl_chan;
-	unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR;
+	u32 dmamux_nr = fsl_edma->drvdata->dmamuxs;
+	unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr;
 
 	if (dma_spec->args_count != 2)
 		return NULL;
@@ -180,16 +181,38 @@ static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
 		clk_disable_unprepare(fsl_edma->muxclk[i]);
 }
 
+static struct fsl_edma_drvdata vf610_data = {
+	.version = v1,
+	.dmamuxs = DMAMUX_NR,
+	.setup_irq = fsl_edma_irq_init,
+};
+
+static const struct of_device_id fsl_edma_dt_ids[] = {
+	{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
+
 static int fsl_edma_probe(struct platform_device *pdev)
 {
+	const struct of_device_id *of_id =
+			of_match_device(fsl_edma_dt_ids, &pdev->dev);
 	struct device_node *np = pdev->dev.of_node;
 	struct fsl_edma_engine *fsl_edma;
+	const struct fsl_edma_drvdata *drvdata = NULL;
 	struct fsl_edma_chan *fsl_chan;
 	struct edma_regs *regs;
 	struct resource *res;
 	int len, chans;
 	int ret, i;
 
+	if (of_id)
+		drvdata = of_id->data;
+	if (!drvdata) {
+		dev_err(&pdev->dev, "unable to find driver data\n");
+		return -EINVAL;
+	}
+
 	ret = of_property_read_u32(np, "dma-channels", &chans);
 	if (ret) {
 		dev_err(&pdev->dev, "Can't get dma-channels.\n");
@@ -201,7 +224,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
 	if (!fsl_edma)
 		return -ENOMEM;
 
-	fsl_edma->version = v1;
+	fsl_edma->drvdata = drvdata;
 	fsl_edma->n_chans = chans;
 	mutex_init(&fsl_edma->fsl_edma_mutex);
 
@@ -213,7 +236,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
 	fsl_edma_setup_regs(fsl_edma);
 	regs = &fsl_edma->regs;
 
-	for (i = 0; i < DMAMUX_NR; i++) {
+	for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
 		char clkname[32];
 
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
@@ -259,7 +282,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
 	}
 
 	edma_writel(fsl_edma, ~0, regs->intl);
-	ret = fsl_edma_irq_init(pdev, fsl_edma);
+	ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
 	if (ret)
 		return ret;
 
@@ -291,7 +314,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
 	if (ret) {
 		dev_err(&pdev->dev,
 			"Can't register Freescale eDMA engine. (%d)\n", ret);
-		fsl_disable_clocks(fsl_edma, DMAMUX_NR);
+		fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
 		return ret;
 	}
 
@@ -300,7 +323,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
 		dev_err(&pdev->dev,
 			"Can't register Freescale eDMA of_dma. (%d)\n", ret);
 		dma_async_device_unregister(&fsl_edma->dma_dev);
-		fsl_disable_clocks(fsl_edma, DMAMUX_NR);
+		fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
 		return ret;
 	}
 
@@ -319,7 +342,7 @@ static int fsl_edma_remove(struct platform_device *pdev)
 	fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
 	of_dma_controller_free(np);
 	dma_async_device_unregister(&fsl_edma->dma_dev);
-	fsl_disable_clocks(fsl_edma, DMAMUX_NR);
+	fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
 
 	return 0;
 }
@@ -378,12 +401,6 @@ static const struct dev_pm_ops fsl_edma_pm_ops = {
 	.resume_early   = fsl_edma_resume_early,
 };
 
-static const struct of_device_id fsl_edma_dt_ids[] = {
-	{ .compatible = "fsl,vf610-edma", },
-	{ /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
-
 static struct platform_driver fsl_edma_driver = {
 	.driver		= {
 		.name	= "fsl-edma",
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 60b062c3647b7c0149839533cc0e0e3732471fb8..8e341c0c13bcf5772e7a33eedb9fc6e045c46895 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -113,6 +113,7 @@
 /* Field definition for Descriptor offset */
 #define QDMA_CCDF_STATUS		20
 #define QDMA_CCDF_OFFSET		20
+#define QDMA_SDDF_CMD(x)		(((u64)(x)) << 32)
 
 /* Field definition for safe loop count*/
 #define FSL_QDMA_HALT_COUNT		1500
@@ -341,6 +342,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
 static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
 				      dma_addr_t dst, dma_addr_t src, u32 len)
 {
+	u32 cmd;
 	struct fsl_qdma_format *sdf, *ddf;
 	struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
 
@@ -369,14 +371,14 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
 	/* This entry is the last entry. */
 	qdma_csgf_set_f(csgf_dest, len);
 	/* Descriptor Buffer */
-	sdf->data =
-		cpu_to_le64(FSL_QDMA_CMD_RWTTYPE <<
-			    FSL_QDMA_CMD_RWTTYPE_OFFSET);
-	ddf->data =
-		cpu_to_le64(FSL_QDMA_CMD_RWTTYPE <<
-			    FSL_QDMA_CMD_RWTTYPE_OFFSET);
-	ddf->data |=
-		cpu_to_le64(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
+	cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
+			  FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	sdf->data = QDMA_SDDF_CMD(cmd);
+
+	cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
+			  FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
+	ddf->data = QDMA_SDDF_CMD(cmd);
 }
 
 /*
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 0c2610066ba9217c7647553501b02008baea9adc..025d8ad5a63c12c3e04993912b52d6374ca460c0 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -61,10 +61,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
 
 	if (hsuc->direction == DMA_MEM_TO_DEV) {
 		bsr = config->dst_maxburst;
-		mtsr = config->src_addr_width;
+		mtsr = config->dst_addr_width;
 	} else if (hsuc->direction == DMA_DEV_TO_MEM) {
 		bsr = config->src_maxburst;
-		mtsr = config->dst_addr_width;
+		mtsr = config->src_addr_width;
 	}
 
 	hsu_chan_disable(hsuc);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 4ec84a633bd304b104888d63cc0eadac8d468616..a01f4b5d793c4ed591b6666110b7ed00e5bce8e6 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1934,16 +1934,11 @@ static int sdma_init(struct sdma_engine *sdma)
 static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
 {
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
-	struct sdma_engine *sdma = sdmac->sdma;
 	struct imx_dma_data *data = fn_param;
 
 	if (!imx_dma_is_general_purpose(chan))
 		return false;
 
-	/* return false if it's not the right device */
-	if (sdma->dev->of_node != data->of_node)
-		return false;
-
 	sdmac->data = *data;
 	chan->private = &sdmac->data;
 
@@ -1971,9 +1966,9 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
 	 * be set to sdmac->event_id1.
 	 */
 	data.dma_request2 = 0;
-	data.of_node = ofdma->of_node;
 
-	return dma_request_channel(mask, sdma_filter_fn, &data);
+	return __dma_request_channel(&mask, sdma_filter_fn, &data,
+				     ofdma->of_node);
 }
 
 static int sdma_probe(struct platform_device *pdev)
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
index 7de54b2fafdb5467a2678d8f198b947d0b51876d..e15bd15a9ef6ac572da0086c33bee79ba6251600 100644
--- a/drivers/dma/mcf-edma.c
+++ b/drivers/dma/mcf-edma.c
@@ -164,6 +164,11 @@ static void mcf_edma_irq_free(struct platform_device *pdev,
 		free_irq(irq, mcf_edma);
 }
 
+static struct fsl_edma_drvdata mcf_data = {
+	.version = v2,
+	.setup_irq = mcf_edma_irq_init,
+};
+
 static int mcf_edma_probe(struct platform_device *pdev)
 {
 	struct mcf_edma_platform_data *pdata;
@@ -187,8 +192,8 @@ static int mcf_edma_probe(struct platform_device *pdev)
 
 	mcf_edma->n_chans = chans;
 
-	/* Set up version for ColdFire edma */
-	mcf_edma->version = v2;
+	/* Set up drvdata for ColdFire edma */
+	mcf_edma->drvdata = &mcf_data;
 	mcf_edma->big_endian = 1;
 
 	if (!mcf_edma->n_chans) {
@@ -223,7 +228,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
 	iowrite32(~0, regs->inth);
 	iowrite32(~0, regs->intl);
 
-	ret = mcf_edma_irq_init(pdev, mcf_edma);
+	ret = mcf_edma->drvdata->setup_irq(pdev, mcf_edma);
 	if (ret)
 		return ret;
 
diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig
index 7411eb3d419e506a47321a98277fe53ad48036a0..1ad63ddc292dee4b0c4bb42e28a699e3133441e7 100644
--- a/drivers/dma/mediatek/Kconfig
+++ b/drivers/dma/mediatek/Kconfig
@@ -25,3 +25,14 @@ config MTK_CQDMA
 
 	  This controller provides the channels which is dedicated to
 	  memory-to-memory transfer to offload from CPU.
+
+config MTK_UART_APDMA
+	tristate "MediaTek SoCs APDMA support for UART"
+	depends on OF && SERIAL_8250_MT6577
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support for the UART DMA engine found on MediaTek MTK SoCs.
+	  When SERIAL_8250_MT6577 is enabled, and if you want to use DMA,
+	  you can enable the config. The DMA engine can only be used
+	  with MediaTek SoCs.
diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile
index 13b144594510b2672a0b8d4498e977f37f322b17..5ba39a5edc130063a46ddf9565ce887d1e09f88f 100644
--- a/drivers/dma/mediatek/Makefile
+++ b/drivers/dma/mediatek/Makefile
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MTK_UART_APDMA) += mtk-uart-apdma.o
 obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
 obj-$(CONFIG_MTK_CQDMA) += mtk-cqdma.o
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
new file mode 100644
index 0000000000000000000000000000000000000000..546995c2087637c8bfdce527ee9c739332ed9adf
--- /dev/null
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek UART APDMA driver.
+ *
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Long Cheng <long.cheng@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../virt-dma.h"
+
+/* The default number of virtual channel */
+#define MTK_UART_APDMA_NR_VCHANS	8
+
+#define VFF_EN_B		BIT(0)
+#define VFF_STOP_B		BIT(0)
+#define VFF_FLUSH_B		BIT(0)
+#define VFF_4G_EN_B		BIT(0)
+/* rx valid size >=  vff thre */
+#define VFF_RX_INT_EN_B		(BIT(0) | BIT(1))
+/* tx left size >= vff thre */
+#define VFF_TX_INT_EN_B		BIT(0)
+#define VFF_WARM_RST_B		BIT(0)
+#define VFF_RX_INT_CLR_B	(BIT(0) | BIT(1))
+#define VFF_TX_INT_CLR_B	0
+#define VFF_STOP_CLR_B		0
+#define VFF_EN_CLR_B		0
+#define VFF_INT_EN_CLR_B	0
+#define VFF_4G_SUPPORT_CLR_B	0
+
+/*
+ * interrupt trigger level for tx
+ * if threshold is n, no polling is required to start tx.
+ * otherwise need polling VFF_FLUSH.
+ */
+#define VFF_TX_THRE(n)		(n)
+/* interrupt trigger level for rx */
+#define VFF_RX_THRE(n)		((n) * 3 / 4)
+
+#define VFF_RING_SIZE	0xffff
+/* invert this bit when wrap ring head again */
+#define VFF_RING_WRAP	0x10000
+
+#define VFF_INT_FLAG		0x00
+#define VFF_INT_EN		0x04
+#define VFF_EN			0x08
+#define VFF_RST			0x0c
+#define VFF_STOP		0x10
+#define VFF_FLUSH		0x14
+#define VFF_ADDR		0x1c
+#define VFF_LEN			0x24
+#define VFF_THRE		0x28
+#define VFF_WPT			0x2c
+#define VFF_RPT			0x30
+/* TX: the buffer size HW can read. RX: the buffer size SW can read. */
+#define VFF_VALID_SIZE		0x3c
+/* TX: the buffer size SW can write. RX: the buffer size HW can write. */
+#define VFF_LEFT_SIZE		0x40
+#define VFF_DEBUG_STATUS	0x50
+#define VFF_4G_SUPPORT		0x54
+
+struct mtk_uart_apdmadev {
+	struct dma_device ddev;
+	struct clk *clk;
+	bool support_33bits;
+	unsigned int dma_requests;
+};
+
+struct mtk_uart_apdma_desc {
+	struct virt_dma_desc vd;
+
+	dma_addr_t addr;
+	unsigned int avail_len;
+};
+
+struct mtk_chan {
+	struct virt_dma_chan vc;
+	struct dma_slave_config	cfg;
+	struct mtk_uart_apdma_desc *desc;
+	enum dma_transfer_direction dir;
+
+	void __iomem *base;
+	unsigned int irq;
+
+	unsigned int rx_status;
+};
+
+static inline struct mtk_uart_apdmadev *
+to_mtk_uart_apdma_dev(struct dma_device *d)
+{
+	return container_of(d, struct mtk_uart_apdmadev, ddev);
+}
+
+static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct mtk_chan, vc.chan);
+}
+
+static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
+	(struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
+}
+
+static void mtk_uart_apdma_write(struct mtk_chan *c,
+			       unsigned int reg, unsigned int val)
+{
+	writel(val, c->base + reg);
+}
+
+static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
+{
+	return readl(c->base + reg);
+}
+
+static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
+{
+	struct dma_chan *chan = vd->tx.chan;
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+
+	kfree(c->desc);
+}
+
+static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
+{
+	struct mtk_uart_apdmadev *mtkd =
+				to_mtk_uart_apdma_dev(c->vc.chan.device);
+	struct mtk_uart_apdma_desc *d = c->desc;
+	unsigned int wpt, vff_sz;
+
+	vff_sz = c->cfg.dst_port_window_size;
+	if (!mtk_uart_apdma_read(c, VFF_LEN)) {
+		mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
+		mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
+		mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz));
+		mtk_uart_apdma_write(c, VFF_WPT, 0);
+		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
+
+		if (mtkd->support_33bits)
+			mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
+	}
+
+	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
+	if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
+		dev_err(c->vc.chan.device->dev, "Enable TX fail\n");
+
+	if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) {
+		mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
+		return;
+	}
+
+	wpt = mtk_uart_apdma_read(c, VFF_WPT);
+
+	wpt += c->desc->avail_len;
+	if ((wpt & VFF_RING_SIZE) == vff_sz)
+		wpt = (wpt & VFF_RING_WRAP) ^ VFF_RING_WRAP;
+
+	/* Let DMA start moving data */
+	mtk_uart_apdma_write(c, VFF_WPT, wpt);
+
+	/* HW auto set to 0 when left size >= threshold */
+	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
+	if (!mtk_uart_apdma_read(c, VFF_FLUSH))
+		mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
+}
+
+static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
+{
+	struct mtk_uart_apdmadev *mtkd =
+				to_mtk_uart_apdma_dev(c->vc.chan.device);
+	struct mtk_uart_apdma_desc *d = c->desc;
+	unsigned int vff_sz;
+
+	vff_sz = c->cfg.src_port_window_size;
+	if (!mtk_uart_apdma_read(c, VFF_LEN)) {
+		mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
+		mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
+		mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz));
+		mtk_uart_apdma_write(c, VFF_RPT, 0);
+		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
+
+		if (mtkd->support_33bits)
+			mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
+	}
+
+	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B);
+	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
+	if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
+		dev_err(c->vc.chan.device->dev, "Enable RX fail\n");
+}
+
+static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
+{
+	struct mtk_uart_apdma_desc *d = c->desc;
+
+	mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
+	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
+	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
+
+	list_del(&d->vd.node);
+	vchan_cookie_complete(&d->vd);
+}
+
+static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
+{
+	struct mtk_uart_apdma_desc *d = c->desc;
+	unsigned int len, wg, rg;
+	int cnt;
+
+	mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
+
+	if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE))
+		return;
+
+	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
+	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
+
+	len = c->cfg.src_port_window_size;
+	rg = mtk_uart_apdma_read(c, VFF_RPT);
+	wg = mtk_uart_apdma_read(c, VFF_WPT);
+	cnt = (wg & VFF_RING_SIZE) - (rg & VFF_RING_SIZE);
+
+	/*
+	 * The buffer is ring buffer. If wrap bit different,
+	 * represents the start of the next cycle for WPT
+	 */
+	if ((rg ^ wg) & VFF_RING_WRAP)
+		cnt += len;
+
+	c->rx_status = d->avail_len - cnt;
+	mtk_uart_apdma_write(c, VFF_RPT, wg);
+
+	list_del(&d->vd.node);
+	vchan_cookie_complete(&d->vd);
+}
+
+static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
+{
+	struct dma_chan *chan = (struct dma_chan *)dev_id;
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	if (c->dir == DMA_DEV_TO_MEM)
+		mtk_uart_apdma_rx_handler(c);
+	else if (c->dir == DMA_MEM_TO_DEV)
+		mtk_uart_apdma_tx_handler(c);
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+	unsigned int status;
+	int ret;
+
+	ret = pm_runtime_get_sync(mtkd->ddev.dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(chan->device->dev);
+		return ret;
+	}
+
+	mtk_uart_apdma_write(c, VFF_ADDR, 0);
+	mtk_uart_apdma_write(c, VFF_THRE, 0);
+	mtk_uart_apdma_write(c, VFF_LEN, 0);
+	mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B);
+
+	ret = readx_poll_timeout(readl, c->base + VFF_EN,
+			  status, !status, 10, 100);
+	if (ret)
+		return ret;
+
+	ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
+			  IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
+	if (ret < 0) {
+		dev_err(chan->device->dev, "Can't request dma IRQ\n");
+		return -EINVAL;
+	}
+
+	if (mtkd->support_33bits)
+		mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
+
+	return ret;
+}
+
+static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
+{
+	struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+
+	free_irq(c->irq, chan);
+
+	tasklet_kill(&c->vc.task);
+
+	vchan_free_chan_resources(&c->vc);
+
+	pm_runtime_put_sync(mtkd->ddev.dev);
+}
+
+static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
+					 dma_cookie_t cookie,
+					 struct dma_tx_state *txstate)
+{
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (!txstate)
+		return ret;
+
+	dma_set_residue(txstate, c->rx_status);
+
+	return ret;
+}
+
+/*
+ * dmaengine_prep_slave_single will call the function. and sglen is 1.
+ * 8250 uart using one ring buffer, and deal with one sg.
+ */
+static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
+	(struct dma_chan *chan, struct scatterlist *sgl,
+	unsigned int sglen, enum dma_transfer_direction dir,
+	unsigned long tx_flags, void *context)
+{
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+	struct mtk_uart_apdma_desc *d;
+
+	if (!is_slave_direction(dir) || sglen != 1)
+		return NULL;
+
+	/* Now allocate and setup the descriptor */
+	d = kzalloc(sizeof(*d), GFP_ATOMIC);
+	if (!d)
+		return NULL;
+
+	d->avail_len = sg_dma_len(sgl);
+	d->addr = sg_dma_address(sgl);
+	c->dir = dir;
+
+	return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
+}
+
+static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
+{
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+	struct virt_dma_desc *vd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	if (vchan_issue_pending(&c->vc)) {
+		vd = vchan_next_desc(&c->vc);
+		c->desc = to_mtk_uart_apdma_desc(&vd->tx);
+
+		if (c->dir == DMA_DEV_TO_MEM)
+			mtk_uart_apdma_start_rx(c);
+		else if (c->dir == DMA_MEM_TO_DEV)
+			mtk_uart_apdma_start_tx(c);
+	}
+
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static int mtk_uart_apdma_slave_config(struct dma_chan *chan,
+				   struct dma_slave_config *config)
+{
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+
+	memcpy(&c->cfg, config, sizeof(*config));
+
+	return 0;
+}
+
+static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
+{
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+	unsigned long flags;
+	unsigned int status;
+	LIST_HEAD(head);
+	int ret;
+
+	mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
+
+	ret = readx_poll_timeout(readl, c->base + VFF_FLUSH,
+			  status, status != VFF_FLUSH_B, 10, 100);
+	if (ret)
+		dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n",
+			mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
+
+	/*
+	 * Stop need 3 steps.
+	 * 1. set stop to 1
+	 * 2. wait en to 0
+	 * 3. set stop as 0
+	 */
+	mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B);
+	ret = readx_poll_timeout(readl, c->base + VFF_EN,
+			  status, !status, 10, 100);
+	if (ret)
+		dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n",
+			mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
+
+	mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B);
+	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
+
+	if (c->dir == DMA_DEV_TO_MEM)
+		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
+	else if (c->dir == DMA_MEM_TO_DEV)
+		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
+
+	synchronize_irq(c->irq);
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	vchan_get_all_descriptors(&c->vc, &head);
+	vchan_dma_desc_free_list(&c->vc, &head);
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+
+	return 0;
+}
+
+static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
+{
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+
+	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
+	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
+
+	synchronize_irq(c->irq);
+
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+
+	return 0;
+}
+
+static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
+{
+	while (!list_empty(&mtkd->ddev.channels)) {
+		struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
+			struct mtk_chan, vc.chan.device_node);
+
+		list_del(&c->vc.chan.device_node);
+		tasklet_kill(&c->vc.task);
+	}
+}
+
+static const struct of_device_id mtk_uart_apdma_match[] = {
+	{ .compatible = "mediatek,mt6577-uart-dma", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
+
+static int mtk_uart_apdma_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct mtk_uart_apdmadev *mtkd;
+	int bit_mask = 32, rc;
+	struct resource *res;
+	struct mtk_chan *c;
+	unsigned int i;
+
+	mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
+	if (!mtkd)
+		return -ENOMEM;
+
+	mtkd->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(mtkd->clk)) {
+		dev_err(&pdev->dev, "No clock specified\n");
+		rc = PTR_ERR(mtkd->clk);
+		return rc;
+	}
+
+	if (of_property_read_bool(np, "mediatek,dma-33bits"))
+		mtkd->support_33bits = true;
+
+	if (mtkd->support_33bits)
+		bit_mask = 33;
+
+	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask));
+	if (rc)
+		return rc;
+
+	dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
+	mtkd->ddev.device_alloc_chan_resources =
+				mtk_uart_apdma_alloc_chan_resources;
+	mtkd->ddev.device_free_chan_resources =
+				mtk_uart_apdma_free_chan_resources;
+	mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
+	mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
+	mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
+	mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
+	mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
+	mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
+	mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
+	mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
+	mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+	mtkd->ddev.dev = &pdev->dev;
+	INIT_LIST_HEAD(&mtkd->ddev.channels);
+
+	mtkd->dma_requests = MTK_UART_APDMA_NR_VCHANS;
+	if (of_property_read_u32(np, "dma-requests", &mtkd->dma_requests)) {
+		dev_info(&pdev->dev,
+			 "Using %u as missing dma-requests property\n",
+			 MTK_UART_APDMA_NR_VCHANS);
+	}
+
+	for (i = 0; i < mtkd->dma_requests; i++) {
+		c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
+		if (!c) {
+			rc = -ENODEV;
+			goto err_no_dma;
+		}
+
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		if (!res) {
+			rc = -ENODEV;
+			goto err_no_dma;
+		}
+
+		c->base = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(c->base)) {
+			rc = PTR_ERR(c->base);
+			goto err_no_dma;
+		}
+		c->vc.desc_free = mtk_uart_apdma_desc_free;
+		vchan_init(&c->vc, &mtkd->ddev);
+
+		rc = platform_get_irq(pdev, i);
+		if (rc < 0) {
+			dev_err(&pdev->dev, "failed to get IRQ[%d]\n", i);
+			goto err_no_dma;
+		}
+		c->irq = rc;
+	}
+
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_set_active(&pdev->dev);
+
+	rc = dma_async_device_register(&mtkd->ddev);
+	if (rc)
+		goto rpm_disable;
+
+	platform_set_drvdata(pdev, mtkd);
+
+	/* Device-tree DMA controller registration */
+	rc = of_dma_controller_register(np, of_dma_xlate_by_chan_id, mtkd);
+	if (rc)
+		goto dma_remove;
+
+	return rc;
+
+dma_remove:
+	dma_async_device_unregister(&mtkd->ddev);
+rpm_disable:
+	pm_runtime_disable(&pdev->dev);
+err_no_dma:
+	mtk_uart_apdma_free(mtkd);
+	return rc;
+}
+
+static int mtk_uart_apdma_remove(struct platform_device *pdev)
+{
+	struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
+
+	of_dma_controller_free(pdev->dev.of_node);
+
+	mtk_uart_apdma_free(mtkd);
+
+	dma_async_device_unregister(&mtkd->ddev);
+
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_uart_apdma_suspend(struct device *dev)
+{
+	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
+
+	if (!pm_runtime_suspended(dev))
+		clk_disable_unprepare(mtkd->clk);
+
+	return 0;
+}
+
+static int mtk_uart_apdma_resume(struct device *dev)
+{
+	int ret;
+	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
+
+	if (!pm_runtime_suspended(dev)) {
+		ret = clk_prepare_enable(mtkd->clk);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int mtk_uart_apdma_runtime_suspend(struct device *dev)
+{
+	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(mtkd->clk);
+
+	return 0;
+}
+
+static int mtk_uart_apdma_runtime_resume(struct device *dev)
+{
+	int ret;
+	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
+
+	ret = clk_prepare_enable(mtkd->clk);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
+	SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
+			   mtk_uart_apdma_runtime_resume, NULL)
+};
+
+static struct platform_driver mtk_uart_apdma_driver = {
+	.probe	= mtk_uart_apdma_probe,
+	.remove	= mtk_uart_apdma_remove,
+	.driver = {
+		.name		= KBUILD_MODNAME,
+		.pm		= &mtk_uart_apdma_pm_ops,
+		.of_match_table = of_match_ptr(mtk_uart_apdma_match),
+	},
+};
+
+module_platform_driver(mtk_uart_apdma_driver);
+
+MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
+MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 730a18d0c6d6ac3b710daf070ce22abb00b974f1..fea8608a781017b2431fb9ae08319db47d56ae6f 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -717,10 +717,8 @@ static int mic_dma_driver_probe(struct mbus_device *mbdev)
 	if (mic_dma_dbg) {
 		mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev),
 							  mic_dma_dbg);
-		if (mic_dma_dev->dbg_dir)
-			debugfs_create_file("mic_dma_reg", 0444,
-					    mic_dma_dev->dbg_dir, mic_dma_dev,
-					    &mic_dma_reg_fops);
+		debugfs_create_file("mic_dma_reg", 0444, mic_dma_dev->dbg_dir,
+				    mic_dma_dev, &mic_dma_reg_fops);
 	}
 	return 0;
 }
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index bb3ccbf90a318d745fdc167598bf9da8a03475da..e7d1e12bf4643e3ae9a35e1cd9881a2505a1c34a 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -582,18 +582,12 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
 }
 
 struct mmp_tdma_filter_param {
-	struct device_node *of_node;
 	unsigned int chan_id;
 };
 
 static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param)
 {
 	struct mmp_tdma_filter_param *param = fn_param;
-	struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
-	struct dma_device *pdma_device = tdmac->chan.device;
-
-	if (pdma_device->dev->of_node != param->of_node)
-		return false;
 
 	if (chan->chan_id != param->chan_id)
 		return false;
@@ -611,13 +605,13 @@ static struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
 	if (dma_spec->args_count != 1)
 		return NULL;
 
-	param.of_node = ofdma->of_node;
 	param.chan_id = dma_spec->args[0];
 
 	if (param.chan_id >= TDMA_CHANNEL_NUM)
 		return NULL;
 
-	return dma_request_channel(mask, mmp_tdma_filter_fn, &param);
+	return __dma_request_channel(&mask, mmp_tdma_filter_fn, &param,
+				     ofdma->of_node);
 }
 
 static const struct of_device_id mmp_tdma_dt_ids[] = {
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 20a9cb7cb6d346bbbe0426c977670772df49a76a..3039bba0e4d559836179fe0f71941da428bd563c 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -719,7 +719,6 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
 }
 
 struct mxs_dma_filter_param {
-	struct device_node *of_node;
 	unsigned int chan_id;
 };
 
@@ -730,9 +729,6 @@ static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param)
 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 	int chan_irq;
 
-	if (mxs_dma->dma_device.dev->of_node != param->of_node)
-		return false;
-
 	if (chan->chan_id != param->chan_id)
 		return false;
 
@@ -755,13 +751,13 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
 	if (dma_spec->args_count != 1)
 		return NULL;
 
-	param.of_node = ofdma->of_node;
 	param.chan_id = dma_spec->args[0];
 
 	if (param.chan_id >= mxs_dma->nr_channels)
 		return NULL;
 
-	return dma_request_channel(mask, mxs_dma_filter_fn, &param);
+	return __dma_request_channel(&mask, mxs_dma_filter_fn, &param,
+				     ofdma->of_node);
 }
 
 static int __init mxs_dma_probe(struct platform_device *pdev)
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 1e4d9ef2aea11f33126c45f2cb0d3515a76de065..c2d779daa4b51ac8d5373a1ccfa8185d3b1f0233 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -313,8 +313,8 @@ struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
 	if (count != 1)
 		return NULL;
 
-	return dma_request_channel(info->dma_cap, info->filter_fn,
-			&dma_spec->args[0]);
+	return __dma_request_channel(&info->dma_cap, info->filter_fn,
+				     &dma_spec->args[0], dma_spec->np);
 }
 EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
 
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 56f9fabc99c46e9ae388d6cdc1d697d3d3e9a4ee..1163af2ba4a36fef2f313dd09236eee4039236de 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -25,6 +25,7 @@
 #include <linux/err.h>
 #include <linux/pm_runtime.h>
 #include <linux/bug.h>
+#include <linux/reset.h>
 
 #include "dmaengine.h"
 #define PL330_MAX_CHAN		8
@@ -496,6 +497,9 @@ struct pl330_dmac {
 	unsigned int num_peripherals;
 	struct dma_pl330_chan *peripherals; /* keep at end */
 	int quirks;
+
+	struct reset_control	*rstc;
+	struct reset_control	*rstc_ocp;
 };
 
 static struct pl330_of_quirks {
@@ -3024,6 +3028,32 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
 
 	amba_set_drvdata(adev, pl330);
 
+	pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma");
+	if (IS_ERR(pl330->rstc)) {
+		if (PTR_ERR(pl330->rstc) != -EPROBE_DEFER)
+			dev_err(&adev->dev, "Failed to get reset!\n");
+		return PTR_ERR(pl330->rstc);
+	} else {
+		ret = reset_control_deassert(pl330->rstc);
+		if (ret) {
+			dev_err(&adev->dev, "Couldn't deassert the device from reset!\n");
+			return ret;
+		}
+	}
+
+	pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp");
+	if (IS_ERR(pl330->rstc_ocp)) {
+		if (PTR_ERR(pl330->rstc_ocp) != -EPROBE_DEFER)
+			dev_err(&adev->dev, "Failed to get OCP reset!\n");
+		return PTR_ERR(pl330->rstc_ocp);
+	} else {
+		ret = reset_control_deassert(pl330->rstc_ocp);
+		if (ret) {
+			dev_err(&adev->dev, "Couldn't deassert the device from OCP reset!\n");
+			return ret;
+		}
+	}
+
 	for (i = 0; i < AMBA_NR_IRQS; i++) {
 		irq = adev->irq[i];
 		if (irq) {
@@ -3164,6 +3194,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
 probe_err2:
 	pl330_del(pl330);
 
+	if (pl330->rstc_ocp)
+		reset_control_assert(pl330->rstc_ocp);
+
+	if (pl330->rstc)
+		reset_control_assert(pl330->rstc);
 	return ret;
 }
 
@@ -3202,6 +3237,11 @@ static int pl330_remove(struct amba_device *adev)
 
 	pl330_del(pl330);
 
+	if (pl330->rstc_ocp)
+		reset_control_assert(pl330->rstc_ocp);
+
+	if (pl330->rstc)
+		reset_control_assert(pl330->rstc);
 	return 0;
 }
 
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 468c234cb3bea405568d6909bdda571a86a0bd42..349fb312c8725678a2a1268a4371047dbcb2ed11 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -129,7 +129,6 @@ struct pxad_device {
 	spinlock_t			phy_lock;	/* Phy association */
 #ifdef CONFIG_DEBUG_FS
 	struct dentry			*dbgfs_root;
-	struct dentry			*dbgfs_state;
 	struct dentry			**dbgfs_chan;
 #endif
 };
@@ -323,31 +322,18 @@ static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev,
 					     int ch, struct dentry *chandir)
 {
 	char chan_name[11];
-	struct dentry *chan, *chan_state = NULL, *chan_descr = NULL;
-	struct dentry *chan_reqs = NULL;
+	struct dentry *chan;
 	void *dt;
 
 	scnprintf(chan_name, sizeof(chan_name), "%d", ch);
 	chan = debugfs_create_dir(chan_name, chandir);
 	dt = (void *)&pdev->phys[ch];
 
-	if (chan)
-		chan_state = debugfs_create_file("state", 0400, chan, dt,
-						 &chan_state_fops);
-	if (chan_state)
-		chan_descr = debugfs_create_file("descriptors", 0400, chan, dt,
-						 &descriptors_fops);
-	if (chan_descr)
-		chan_reqs = debugfs_create_file("requesters", 0400, chan, dt,
-						&requester_chan_fops);
-	if (!chan_reqs)
-		goto err_state;
+	debugfs_create_file("state", 0400, chan, dt, &chan_state_fops);
+	debugfs_create_file("descriptors", 0400, chan, dt, &descriptors_fops);
+	debugfs_create_file("requesters", 0400, chan, dt, &requester_chan_fops);
 
 	return chan;
-
-err_state:
-	debugfs_remove_recursive(chan);
-	return NULL;
 }
 
 static void pxad_init_debugfs(struct pxad_device *pdev)
@@ -355,40 +341,20 @@ static void pxad_init_debugfs(struct pxad_device *pdev)
 	int i;
 	struct dentry *chandir;
 
-	pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL);
-	if (IS_ERR(pdev->dbgfs_root) || !pdev->dbgfs_root)
-		goto err_root;
-
-	pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root,
-						pdev, &state_fops);
-	if (!pdev->dbgfs_state)
-		goto err_state;
-
 	pdev->dbgfs_chan =
-		kmalloc_array(pdev->nr_chans, sizeof(*pdev->dbgfs_state),
+		kmalloc_array(pdev->nr_chans, sizeof(struct dentry *),
 			      GFP_KERNEL);
 	if (!pdev->dbgfs_chan)
-		goto err_alloc;
+		return;
+
+	pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL);
+
+	debugfs_create_file("state", 0400, pdev->dbgfs_root, pdev, &state_fops);
 
 	chandir = debugfs_create_dir("channels", pdev->dbgfs_root);
-	if (!chandir)
-		goto err_chandir;
 
-	for (i = 0; i < pdev->nr_chans; i++) {
+	for (i = 0; i < pdev->nr_chans; i++)
 		pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir);
-		if (!pdev->dbgfs_chan[i])
-			goto err_chans;
-	}
-
-	return;
-err_chans:
-err_chandir:
-	kfree(pdev->dbgfs_chan);
-err_alloc:
-err_state:
-	debugfs_remove_recursive(pdev->dbgfs_root);
-err_root:
-	pr_err("pxad: debugfs is not available\n");
 }
 
 static void pxad_cleanup_debugfs(struct pxad_device *pdev)
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index f337e2789ddcf1534bb22b790b9fa4241996664f..f212466744f32f083de453737d22a0aaee39273a 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -93,8 +93,6 @@ struct hidma_chan {
 	 * It is used by the DMA complete notification to
 	 * locate the descriptor that initiated the transfer.
 	 */
-	struct dentry			*debugfs;
-	struct dentry			*stats;
 	struct hidma_dev		*dmadev;
 	struct hidma_desc		*running;
 
@@ -126,7 +124,6 @@ struct hidma_dev {
 	struct dma_device		ddev;
 
 	struct dentry			*debugfs;
-	struct dentry			*stats;
 
 	/* sysfs entry for the channel id */
 	struct device_attribute		*chid_attrs;
@@ -158,6 +155,6 @@ irqreturn_t hidma_ll_inthandler(int irq, void *arg);
 irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause);
 void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
 				u8 err_code);
-int hidma_debug_init(struct hidma_dev *dmadev);
+void hidma_debug_init(struct hidma_dev *dmadev);
 void hidma_debug_uninit(struct hidma_dev *dmadev);
 #endif
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c
index 75b0691a670d7ae48534ceec69896ad230922008..ce87c7937a0e44e4403d56a58649f8796df7cafd 100644
--- a/drivers/dma/qcom/hidma_dbg.c
+++ b/drivers/dma/qcom/hidma_dbg.c
@@ -138,17 +138,13 @@ void hidma_debug_uninit(struct hidma_dev *dmadev)
 	debugfs_remove_recursive(dmadev->debugfs);
 }
 
-int hidma_debug_init(struct hidma_dev *dmadev)
+void hidma_debug_init(struct hidma_dev *dmadev)
 {
-	int rc = 0;
 	int chidx = 0;
 	struct list_head *position = NULL;
+	struct dentry *dir;
 
 	dmadev->debugfs = debugfs_create_dir(dev_name(dmadev->ddev.dev), NULL);
-	if (!dmadev->debugfs) {
-		rc = -ENODEV;
-		return rc;
-	}
 
 	/* walk through the virtual channel list */
 	list_for_each(position, &dmadev->ddev.channels) {
@@ -157,32 +153,13 @@ int hidma_debug_init(struct hidma_dev *dmadev)
 		chan = list_entry(position, struct hidma_chan,
 				  chan.device_node);
 		sprintf(chan->dbg_name, "chan%d", chidx);
-		chan->debugfs = debugfs_create_dir(chan->dbg_name,
+		dir = debugfs_create_dir(chan->dbg_name,
 						   dmadev->debugfs);
-		if (!chan->debugfs) {
-			rc = -ENOMEM;
-			goto cleanup;
-		}
-		chan->stats = debugfs_create_file("stats", S_IRUGO,
-						  chan->debugfs, chan,
-						  &hidma_chan_fops);
-		if (!chan->stats) {
-			rc = -ENOMEM;
-			goto cleanup;
-		}
+		debugfs_create_file("stats", S_IRUGO, dir, chan,
+				    &hidma_chan_fops);
 		chidx++;
 	}
 
-	dmadev->stats = debugfs_create_file("stats", S_IRUGO,
-					    dmadev->debugfs, dmadev,
-					    &hidma_dma_fops);
-	if (!dmadev->stats) {
-		rc = -ENOMEM;
-		goto cleanup;
-	}
-
-	return 0;
-cleanup:
-	hidma_debug_uninit(dmadev);
-	return rc;
+	debugfs_create_file("stats", S_IRUGO, dmadev->debugfs, dmadev,
+			    &hidma_dma_fops);
 }
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 4d6b02b3b1f18e6f3fe417bdb9b1d1571201045d..54d5d0369d3c8805f72c589506032f1a62097d2d 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -47,9 +47,3 @@ config RENESAS_USB_DMAC
 	help
 	  This driver supports the USB-DMA controller found in the Renesas
 	  SoCs.
-
-config SUDMAC
-	tristate "Renesas SUDMAC support"
-	depends on SH_DMAE_BASE
-	help
-	  Enable support for the Renesas SUDMAC controllers.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index 42110dd57a56fda87c4cb8a521b0b40f8bd7108c..112fbd22bb3fb984007a7b897c16932e4609c1cc 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -15,4 +15,3 @@ obj-$(CONFIG_SH_DMAE) += shdma.o
 
 obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
 obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
-obj-$(CONFIG_SUDMAC) += sudmac.o
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 33ab1b607e2b0f5cc136bad35f17c006d3e6ac2b..9c41a4e425759fcca6235e80c4ff42bf9fcb6c4b 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1165,7 +1165,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 	struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
 
 	/* Someone calling slave DMA on a generic channel? */
-	if (rchan->mid_rid < 0 || !sg_len) {
+	if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
 		dev_warn(chan->device->dev,
 			 "%s: bad parameter: len=%d, id=%d\n",
 			 __func__, sg_len, rchan->mid_rid);
@@ -1654,8 +1654,7 @@ static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
 	 * Forcing it to call dma_request_channel() and iterate through all
 	 * channels from all controllers is just pointless.
 	 */
-	if (chan->device->device_config != rcar_dmac_device_config ||
-	    dma_spec->np != chan->device->dev->of_node)
+	if (chan->device->device_config != rcar_dmac_device_config)
 		return false;
 
 	return !test_and_set_bit(dma_spec->args[0], dmac->modules);
@@ -1675,7 +1674,8 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
 	dma_cap_zero(mask);
 	dma_cap_set(DMA_SLAVE, mask);
 
-	chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
+	chan = __dma_request_channel(&mask, rcar_dmac_chan_filter, dma_spec,
+				     ofdma->of_node);
 	if (!chan)
 		return NULL;
 
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
deleted file mode 100644
index 30cc3553cb8baef1c2ccefe67e3ab9e73fd648db..0000000000000000000000000000000000000000
--- a/drivers/dma/sh/sudmac.c
+++ /dev/null
@@ -1,414 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Renesas SUDMAC support
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- *
- * based on drivers/dma/sh/shdma.c:
- * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
- * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
- * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
- */
-
-#include <linux/dmaengine.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/sudmac.h>
-
-struct sudmac_chan {
-	struct shdma_chan shdma_chan;
-	void __iomem *base;
-	char dev_id[16];	/* unique name per DMAC of channel */
-
-	u32 offset;		/* for CFG, BA, BBC, CA, CBC, DEN */
-	u32 cfg;
-	u32 dint_end_bit;
-};
-
-struct sudmac_device {
-	struct shdma_dev shdma_dev;
-	struct sudmac_pdata *pdata;
-	void __iomem *chan_reg;
-};
-
-struct sudmac_regs {
-	u32 base_addr;
-	u32 base_byte_count;
-};
-
-struct sudmac_desc {
-	struct sudmac_regs hw;
-	struct shdma_desc shdma_desc;
-};
-
-#define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan)
-#define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc)
-#define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \
-				 struct sudmac_device, shdma_dev.dma_dev)
-
-/* SUDMAC register */
-#define SUDMAC_CH0CFG		0x00
-#define SUDMAC_CH0BA		0x10
-#define SUDMAC_CH0BBC		0x18
-#define SUDMAC_CH0CA		0x20
-#define SUDMAC_CH0CBC		0x28
-#define SUDMAC_CH0DEN		0x30
-#define SUDMAC_DSTSCLR		0x38
-#define SUDMAC_DBUFCTRL		0x3C
-#define SUDMAC_DINTCTRL		0x40
-#define SUDMAC_DINTSTS		0x44
-#define SUDMAC_DINTSTSCLR	0x48
-#define SUDMAC_CH0SHCTRL	0x50
-
-/* Definitions for the sudmac_channel.config */
-#define SUDMAC_SENDBUFM	0x1000 /* b12: Transmit Buffer Mode */
-#define SUDMAC_RCVENDM	0x0100 /* b8: Receive Data Transfer End Mode */
-#define SUDMAC_LBA_WAIT	0x0030 /* b5-4: Local Bus Access Wait */
-
-/* Definitions for the sudmac_channel.dint_end_bit */
-#define SUDMAC_CH1ENDE	0x0002 /* b1: Ch1 DMA Transfer End Int Enable */
-#define SUDMAC_CH0ENDE	0x0001 /* b0: Ch0 DMA Transfer End Int Enable */
-
-#define SUDMAC_DRV_NAME "sudmac"
-
-static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg)
-{
-	iowrite32(data, sc->base + reg);
-}
-
-static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg)
-{
-	return ioread32(sc->base + reg);
-}
-
-static bool sudmac_is_busy(struct sudmac_chan *sc)
-{
-	u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset);
-
-	if (den)
-		return true; /* working */
-
-	return false; /* waiting */
-}
-
-static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw,
-			   struct shdma_desc *sdesc)
-{
-	sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset);
-	sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset);
-	sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset);
-}
-
-static void sudmac_start(struct sudmac_chan *sc)
-{
-	u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
-
-	sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL);
-	sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset);
-}
-
-static void sudmac_start_xfer(struct shdma_chan *schan,
-			      struct shdma_desc *sdesc)
-{
-	struct sudmac_chan *sc = to_chan(schan);
-	struct sudmac_desc *sd = to_desc(sdesc);
-
-	sudmac_set_reg(sc, &sd->hw, sdesc);
-	sudmac_start(sc);
-}
-
-static bool sudmac_channel_busy(struct shdma_chan *schan)
-{
-	struct sudmac_chan *sc = to_chan(schan);
-
-	return sudmac_is_busy(sc);
-}
-
-static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id)
-{
-}
-
-static const struct sudmac_slave_config *sudmac_find_slave(
-	struct sudmac_chan *sc, int slave_id)
-{
-	struct sudmac_device *sdev = to_sdev(sc);
-	struct sudmac_pdata *pdata = sdev->pdata;
-	const struct sudmac_slave_config *cfg;
-	int i;
-
-	for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
-		if (cfg->slave_id == slave_id)
-			return cfg;
-
-	return NULL;
-}
-
-static int sudmac_set_slave(struct shdma_chan *schan, int slave_id,
-			    dma_addr_t slave_addr, bool try)
-{
-	struct sudmac_chan *sc = to_chan(schan);
-	const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id);
-
-	if (!cfg)
-		return -ENODEV;
-
-	return 0;
-}
-
-static inline void sudmac_dma_halt(struct sudmac_chan *sc)
-{
-	u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
-
-	sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset);
-	sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL);
-	sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR);
-}
-
-static int sudmac_desc_setup(struct shdma_chan *schan,
-			     struct shdma_desc *sdesc,
-			     dma_addr_t src, dma_addr_t dst, size_t *len)
-{
-	struct sudmac_chan *sc = to_chan(schan);
-	struct sudmac_desc *sd = to_desc(sdesc);
-
-	dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n",
-		__func__, &src, &dst, *len);
-
-	if (*len > schan->max_xfer_len)
-		*len = schan->max_xfer_len;
-
-	if (dst)
-		sd->hw.base_addr = dst;
-	else if (src)
-		sd->hw.base_addr = src;
-	sd->hw.base_byte_count = *len;
-
-	return 0;
-}
-
-static void sudmac_halt(struct shdma_chan *schan)
-{
-	struct sudmac_chan *sc = to_chan(schan);
-
-	sudmac_dma_halt(sc);
-}
-
-static bool sudmac_chan_irq(struct shdma_chan *schan, int irq)
-{
-	struct sudmac_chan *sc = to_chan(schan);
-	u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS);
-
-	if (!(dintsts & sc->dint_end_bit))
-		return false;
-
-	/* DMA stop */
-	sudmac_dma_halt(sc);
-
-	return true;
-}
-
-static size_t sudmac_get_partial(struct shdma_chan *schan,
-				 struct shdma_desc *sdesc)
-{
-	struct sudmac_chan *sc = to_chan(schan);
-	struct sudmac_desc *sd = to_desc(sdesc);
-	u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset);
-
-	return sd->hw.base_byte_count - current_byte_count;
-}
-
-static bool sudmac_desc_completed(struct shdma_chan *schan,
-				  struct shdma_desc *sdesc)
-{
-	struct sudmac_chan *sc = to_chan(schan);
-	struct sudmac_desc *sd = to_desc(sdesc);
-	u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset);
-
-	return sd->hw.base_addr + sd->hw.base_byte_count == current_addr;
-}
-
-static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq,
-			     unsigned long flags)
-{
-	struct shdma_dev *sdev = &su_dev->shdma_dev;
-	struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
-	struct sudmac_chan *sc;
-	struct shdma_chan *schan;
-	int err;
-
-	sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL);
-	if (!sc)
-		return -ENOMEM;
-
-	schan = &sc->shdma_chan;
-	schan->max_xfer_len = 64 * 1024 * 1024 - 1;
-
-	shdma_chan_probe(sdev, schan, id);
-
-	sc->base = su_dev->chan_reg;
-
-	/* get platform_data */
-	sc->offset = su_dev->pdata->channel->offset;
-	if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE)
-		sc->cfg |= SUDMAC_SENDBUFM;
-	if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE)
-		sc->cfg |= SUDMAC_RCVENDM;
-	sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT;
-
-	if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0)
-		sc->dint_end_bit |= SUDMAC_CH0ENDE;
-	if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1)
-		sc->dint_end_bit |= SUDMAC_CH1ENDE;
-
-	/* set up channel irq */
-	if (pdev->id >= 0)
-		snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d",
-			 pdev->id, id);
-	else
-		snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id);
-
-	err = shdma_request_irq(schan, irq, flags, sc->dev_id);
-	if (err) {
-		dev_err(sdev->dma_dev.dev,
-			"DMA channel %d request_irq failed %d\n", id, err);
-		goto err_no_irq;
-	}
-
-	return 0;
-
-err_no_irq:
-	/* remove from dmaengine device node */
-	shdma_chan_remove(schan);
-	return err;
-}
-
-static void sudmac_chan_remove(struct sudmac_device *su_dev)
-{
-	struct shdma_chan *schan;
-	int i;
-
-	shdma_for_each_chan(schan, &su_dev->shdma_dev, i) {
-		BUG_ON(!schan);
-
-		shdma_chan_remove(schan);
-	}
-}
-
-static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan)
-{
-	/* SUDMAC doesn't need the address */
-	return 0;
-}
-
-static struct shdma_desc *sudmac_embedded_desc(void *buf, int i)
-{
-	return &((struct sudmac_desc *)buf)[i].shdma_desc;
-}
-
-static const struct shdma_ops sudmac_shdma_ops = {
-	.desc_completed = sudmac_desc_completed,
-	.halt_channel = sudmac_halt,
-	.channel_busy = sudmac_channel_busy,
-	.slave_addr = sudmac_slave_addr,
-	.desc_setup = sudmac_desc_setup,
-	.set_slave = sudmac_set_slave,
-	.setup_xfer = sudmac_setup_xfer,
-	.start_xfer = sudmac_start_xfer,
-	.embedded_desc = sudmac_embedded_desc,
-	.chan_irq = sudmac_chan_irq,
-	.get_partial = sudmac_get_partial,
-};
-
-static int sudmac_probe(struct platform_device *pdev)
-{
-	struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev);
-	int err, i;
-	struct sudmac_device *su_dev;
-	struct dma_device *dma_dev;
-	struct resource *chan, *irq_res;
-
-	/* get platform data */
-	if (!pdata)
-		return -ENODEV;
-
-	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (!irq_res)
-		return -ENODEV;
-
-	err = -ENOMEM;
-	su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device),
-			      GFP_KERNEL);
-	if (!su_dev)
-		return err;
-
-	dma_dev = &su_dev->shdma_dev.dma_dev;
-
-	chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	su_dev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
-	if (IS_ERR(su_dev->chan_reg))
-		return PTR_ERR(su_dev->chan_reg);
-
-	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
-
-	su_dev->shdma_dev.ops = &sudmac_shdma_ops;
-	su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc);
-	err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num);
-	if (err < 0)
-		return err;
-
-	/* platform data */
-	su_dev->pdata = dev_get_platdata(&pdev->dev);
-
-	platform_set_drvdata(pdev, su_dev);
-
-	/* Create DMA Channel */
-	for (i = 0; i < pdata->channel_num; i++) {
-		err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED);
-		if (err)
-			goto chan_probe_err;
-	}
-
-	err = dma_async_device_register(&su_dev->shdma_dev.dma_dev);
-	if (err < 0)
-		goto chan_probe_err;
-
-	return err;
-
-chan_probe_err:
-	sudmac_chan_remove(su_dev);
-
-	shdma_cleanup(&su_dev->shdma_dev);
-
-	return err;
-}
-
-static int sudmac_remove(struct platform_device *pdev)
-{
-	struct sudmac_device *su_dev = platform_get_drvdata(pdev);
-	struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
-
-	dma_async_device_unregister(dma_dev);
-	sudmac_chan_remove(su_dev);
-	shdma_cleanup(&su_dev->shdma_dev);
-
-	return 0;
-}
-
-static struct platform_driver sudmac_driver = {
-	.driver		= {
-		.name	= SUDMAC_DRV_NAME,
-	},
-	.probe		= sudmac_probe,
-	.remove		= sudmac_remove,
-};
-module_platform_driver(sudmac_driver);
-
-MODULE_AUTHOR("Yoshihiro Shimoda");
-MODULE_DESCRIPTION("Renesas SUDMAC driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" SUDMAC_DRV_NAME);
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 59403f6d008a379b7f0688bfe787cd79bc9102c6..17063aaf51bce98b163e1a61f7430668a3d6e49d 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -57,7 +57,7 @@ struct usb_dmac_desc {
 	u32 residue;
 	struct list_head node;
 	dma_cookie_t done_cookie;
-	struct usb_dmac_sg sg[0];
+	struct usb_dmac_sg sg[];
 };
 
 #define to_usb_dmac_desc(vd)	container_of(vd, struct usb_dmac_desc, vd)
@@ -636,9 +636,6 @@ static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
 	struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
 	struct of_phandle_args *dma_spec = arg;
 
-	if (dma_spec->np != chan->device->dev->of_node)
-		return false;
-
 	/* USB-DMAC should be used with fixed usb controller's FIFO */
 	if (uchan->index != dma_spec->args[0])
 		return false;
@@ -659,7 +656,8 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
 	dma_cap_zero(mask);
 	dma_cap_set(DMA_SLAVE, mask);
 
-	chan = dma_request_channel(mask, usb_dmac_chan_filter, dma_spec);
+	chan = __dma_request_channel(&mask, usb_dmac_chan_filter, dma_spec,
+				     ofdma->of_node);
 	if (!chan)
 		return NULL;
 
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index da41bab98f5bbd7b86dbfe7e711f137382b22386..ef4d109e718900178f5b7f1bbacd53d5feaa142d 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -1365,7 +1365,6 @@ static int stm32_dma_probe(struct platform_device *pdev)
 
 	for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
 		chan = &dmadev->chan[i];
-		chan->irq = platform_get_irq(pdev, i);
 		ret = platform_get_irq(pdev, i);
 		if (ret < 0)  {
 			if (ret != -EPROBE_DEFER)
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index 715aad7a91928f4afc48d53c67d13365e3f38be7..b552949da14b20a3755e540bcc637dcaf661a75e 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -295,8 +295,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
 #ifdef CONFIG_PM
 static int stm32_dmamux_runtime_suspend(struct device *dev)
 {
-	struct platform_device *pdev =
-		container_of(dev, struct platform_device, dev);
+	struct platform_device *pdev = to_platform_device(dev);
 	struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
 
 	clk_disable_unprepare(stm32_dmamux->clk);
@@ -306,8 +305,7 @@ static int stm32_dmamux_runtime_suspend(struct device *dev)
 
 static int stm32_dmamux_runtime_resume(struct device *dev)
 {
-	struct platform_device *pdev =
-		container_of(dev, struct platform_device, dev);
+	struct platform_device *pdev = to_platform_device(dev);
 	struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
 	int ret;
 
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index e8fcc69b1de964be21f534f06f125d4540ed12fa..ed5b68dcfe50c8bb9c6da0fbca820528d34f93dd 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -64,17 +64,20 @@
 #define DMA_CHAN_LLI_ADDR	0x08
 
 #define DMA_CHAN_CUR_CFG	0x0c
-#define DMA_CHAN_MAX_DRQ		0x1f
-#define DMA_CHAN_CFG_SRC_DRQ(x)		((x) & DMA_CHAN_MAX_DRQ)
-#define DMA_CHAN_CFG_SRC_IO_MODE	BIT(5)
-#define DMA_CHAN_CFG_SRC_LINEAR_MODE	(0 << 5)
+#define DMA_CHAN_MAX_DRQ_A31		0x1f
+#define DMA_CHAN_MAX_DRQ_H6		0x3f
+#define DMA_CHAN_CFG_SRC_DRQ_A31(x)	((x) & DMA_CHAN_MAX_DRQ_A31)
+#define DMA_CHAN_CFG_SRC_DRQ_H6(x)	((x) & DMA_CHAN_MAX_DRQ_H6)
+#define DMA_CHAN_CFG_SRC_MODE_A31(x)	(((x) & 0x1) << 5)
+#define DMA_CHAN_CFG_SRC_MODE_H6(x)	(((x) & 0x1) << 8)
 #define DMA_CHAN_CFG_SRC_BURST_A31(x)	(((x) & 0x3) << 7)
 #define DMA_CHAN_CFG_SRC_BURST_H3(x)	(((x) & 0x3) << 6)
 #define DMA_CHAN_CFG_SRC_WIDTH(x)	(((x) & 0x3) << 9)
 
-#define DMA_CHAN_CFG_DST_DRQ(x)		(DMA_CHAN_CFG_SRC_DRQ(x) << 16)
-#define DMA_CHAN_CFG_DST_IO_MODE	(DMA_CHAN_CFG_SRC_IO_MODE << 16)
-#define DMA_CHAN_CFG_DST_LINEAR_MODE	(DMA_CHAN_CFG_SRC_LINEAR_MODE << 16)
+#define DMA_CHAN_CFG_DST_DRQ_A31(x)	(DMA_CHAN_CFG_SRC_DRQ_A31(x) << 16)
+#define DMA_CHAN_CFG_DST_DRQ_H6(x)	(DMA_CHAN_CFG_SRC_DRQ_H6(x) << 16)
+#define DMA_CHAN_CFG_DST_MODE_A31(x)	(DMA_CHAN_CFG_SRC_MODE_A31(x) << 16)
+#define DMA_CHAN_CFG_DST_MODE_H6(x)	(DMA_CHAN_CFG_SRC_MODE_H6(x) << 16)
 #define DMA_CHAN_CFG_DST_BURST_A31(x)	(DMA_CHAN_CFG_SRC_BURST_A31(x) << 16)
 #define DMA_CHAN_CFG_DST_BURST_H3(x)	(DMA_CHAN_CFG_SRC_BURST_H3(x) << 16)
 #define DMA_CHAN_CFG_DST_WIDTH(x)	(DMA_CHAN_CFG_SRC_WIDTH(x) << 16)
@@ -94,6 +97,8 @@
 #define LLI_LAST_ITEM	0xfffff800
 #define NORMAL_WAIT	8
 #define DRQ_SDRAM	1
+#define LINEAR_MODE     0
+#define IO_MODE         1
 
 /* forward declaration */
 struct sun6i_dma_dev;
@@ -121,10 +126,13 @@ struct sun6i_dma_config {
 	 */
 	void (*clock_autogate_enable)(struct sun6i_dma_dev *);
 	void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst);
+	void (*set_drq)(u32 *p_cfg, s8 src_drq, s8 dst_drq);
+	void (*set_mode)(u32 *p_cfg, s8 src_mode, s8 dst_mode);
 	u32 src_burst_lengths;
 	u32 dst_burst_lengths;
 	u32 src_addr_widths;
 	u32 dst_addr_widths;
+	bool has_mbus_clk;
 };
 
 /*
@@ -178,6 +186,7 @@ struct sun6i_dma_dev {
 	struct dma_device	slave;
 	void __iomem		*base;
 	struct clk		*clk;
+	struct clk		*clk_mbus;
 	int			irq;
 	spinlock_t		lock;
 	struct reset_control	*rstc;
@@ -305,6 +314,30 @@ static void sun6i_set_burst_length_h3(u32 *p_cfg, s8 src_burst, s8 dst_burst)
 		  DMA_CHAN_CFG_DST_BURST_H3(dst_burst);
 }
 
+static void sun6i_set_drq_a31(u32 *p_cfg, s8 src_drq, s8 dst_drq)
+{
+	*p_cfg |= DMA_CHAN_CFG_SRC_DRQ_A31(src_drq) |
+		  DMA_CHAN_CFG_DST_DRQ_A31(dst_drq);
+}
+
+static void sun6i_set_drq_h6(u32 *p_cfg, s8 src_drq, s8 dst_drq)
+{
+	*p_cfg |= DMA_CHAN_CFG_SRC_DRQ_H6(src_drq) |
+		  DMA_CHAN_CFG_DST_DRQ_H6(dst_drq);
+}
+
+static void sun6i_set_mode_a31(u32 *p_cfg, s8 src_mode, s8 dst_mode)
+{
+	*p_cfg |= DMA_CHAN_CFG_SRC_MODE_A31(src_mode) |
+		  DMA_CHAN_CFG_DST_MODE_A31(dst_mode);
+}
+
+static void sun6i_set_mode_h6(u32 *p_cfg, s8 src_mode, s8 dst_mode)
+{
+	*p_cfg |= DMA_CHAN_CFG_SRC_MODE_H6(src_mode) |
+		  DMA_CHAN_CFG_DST_MODE_H6(dst_mode);
+}
+
 static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan)
 {
 	struct sun6i_desc *txd = pchan->desc;
@@ -628,14 +661,12 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
 
 	burst = convert_burst(8);
 	width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES);
-	v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
-		DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
-		DMA_CHAN_CFG_DST_LINEAR_MODE |
-		DMA_CHAN_CFG_SRC_LINEAR_MODE |
-		DMA_CHAN_CFG_SRC_WIDTH(width) |
+	v_lli->cfg = DMA_CHAN_CFG_SRC_WIDTH(width) |
 		DMA_CHAN_CFG_DST_WIDTH(width);
 
 	sdev->cfg->set_burst_length(&v_lli->cfg, burst, burst);
+	sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, DRQ_SDRAM);
+	sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, LINEAR_MODE);
 
 	sun6i_dma_lli_add(NULL, v_lli, p_lli, txd);
 
@@ -687,11 +718,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
 		if (dir == DMA_MEM_TO_DEV) {
 			v_lli->src = sg_dma_address(sg);
 			v_lli->dst = sconfig->dst_addr;
-			v_lli->cfg = lli_cfg |
-				DMA_CHAN_CFG_DST_IO_MODE |
-				DMA_CHAN_CFG_SRC_LINEAR_MODE |
-				DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
-				DMA_CHAN_CFG_DST_DRQ(vchan->port);
+			v_lli->cfg = lli_cfg;
+			sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port);
+			sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE);
 
 			dev_dbg(chan2dev(chan),
 				"%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
@@ -702,11 +731,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
 		} else {
 			v_lli->src = sconfig->src_addr;
 			v_lli->dst = sg_dma_address(sg);
-			v_lli->cfg = lli_cfg |
-				DMA_CHAN_CFG_DST_LINEAR_MODE |
-				DMA_CHAN_CFG_SRC_IO_MODE |
-				DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
-				DMA_CHAN_CFG_SRC_DRQ(vchan->port);
+			v_lli->cfg = lli_cfg;
+			sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM);
+			sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE);
 
 			dev_dbg(chan2dev(chan),
 				"%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
@@ -772,19 +799,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
 		if (dir == DMA_MEM_TO_DEV) {
 			v_lli->src = buf_addr + period_len * i;
 			v_lli->dst = sconfig->dst_addr;
-			v_lli->cfg = lli_cfg |
-				DMA_CHAN_CFG_DST_IO_MODE |
-				DMA_CHAN_CFG_SRC_LINEAR_MODE |
-				DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
-				DMA_CHAN_CFG_DST_DRQ(vchan->port);
+			v_lli->cfg = lli_cfg;
+			sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port);
+			sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE);
 		} else {
 			v_lli->src = sconfig->src_addr;
 			v_lli->dst = buf_addr + period_len * i;
-			v_lli->cfg = lli_cfg |
-				DMA_CHAN_CFG_DST_LINEAR_MODE |
-				DMA_CHAN_CFG_SRC_IO_MODE |
-				DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
-				DMA_CHAN_CFG_SRC_DRQ(vchan->port);
+			v_lli->cfg = lli_cfg;
+			sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM);
+			sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE);
 		}
 
 		prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd);
@@ -1049,6 +1072,8 @@ static struct sun6i_dma_config sun6i_a31_dma_cfg = {
 	.nr_max_requests = 30,
 	.nr_max_vchans   = 53,
 	.set_burst_length = sun6i_set_burst_length_a31,
+	.set_drq          = sun6i_set_drq_a31,
+	.set_mode         = sun6i_set_mode_a31,
 	.src_burst_lengths = BIT(1) | BIT(8),
 	.dst_burst_lengths = BIT(1) | BIT(8),
 	.src_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
@@ -1070,6 +1095,8 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = {
 	.nr_max_vchans   = 37,
 	.clock_autogate_enable = sun6i_enable_clock_autogate_a23,
 	.set_burst_length = sun6i_set_burst_length_a31,
+	.set_drq          = sun6i_set_drq_a31,
+	.set_mode         = sun6i_set_mode_a31,
 	.src_burst_lengths = BIT(1) | BIT(8),
 	.dst_burst_lengths = BIT(1) | BIT(8),
 	.src_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
@@ -1086,6 +1113,8 @@ static struct sun6i_dma_config sun8i_a83t_dma_cfg = {
 	.nr_max_vchans   = 39,
 	.clock_autogate_enable = sun6i_enable_clock_autogate_a23,
 	.set_burst_length = sun6i_set_burst_length_a31,
+	.set_drq          = sun6i_set_drq_a31,
+	.set_mode         = sun6i_set_mode_a31,
 	.src_burst_lengths = BIT(1) | BIT(8),
 	.dst_burst_lengths = BIT(1) | BIT(8),
 	.src_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
@@ -1109,6 +1138,8 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = {
 	.nr_max_vchans   = 34,
 	.clock_autogate_enable = sun6i_enable_clock_autogate_h3,
 	.set_burst_length = sun6i_set_burst_length_h3,
+	.set_drq          = sun6i_set_drq_a31,
+	.set_mode         = sun6i_set_mode_a31,
 	.src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
 	.dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
 	.src_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
@@ -1128,6 +1159,8 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = {
 static struct sun6i_dma_config sun50i_a64_dma_cfg = {
 	.clock_autogate_enable = sun6i_enable_clock_autogate_h3,
 	.set_burst_length = sun6i_set_burst_length_h3,
+	.set_drq          = sun6i_set_drq_a31,
+	.set_mode         = sun6i_set_mode_a31,
 	.src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
 	.dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
 	.src_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
@@ -1140,6 +1173,28 @@ static struct sun6i_dma_config sun50i_a64_dma_cfg = {
 			     BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
 };
 
+/*
+ * The H6 binding uses the number of dma channels from the
+ * device tree node.
+ */
+static struct sun6i_dma_config sun50i_h6_dma_cfg = {
+	.clock_autogate_enable = sun6i_enable_clock_autogate_h3,
+	.set_burst_length = sun6i_set_burst_length_h3,
+	.set_drq          = sun6i_set_drq_h6,
+	.set_mode         = sun6i_set_mode_h6,
+	.src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
+	.dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
+	.src_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+			     BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+			     BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+			     BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
+	.dst_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+			     BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+			     BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+			     BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
+	.has_mbus_clk = true,
+};
+
 /*
  * The V3s have only 8 physical channels, a maximum DRQ port id of 23,
  * and a total of 24 usable source and destination endpoints.
@@ -1151,6 +1206,8 @@ static struct sun6i_dma_config sun8i_v3s_dma_cfg = {
 	.nr_max_vchans   = 24,
 	.clock_autogate_enable = sun6i_enable_clock_autogate_a23,
 	.set_burst_length = sun6i_set_burst_length_a31,
+	.set_drq          = sun6i_set_drq_a31,
+	.set_mode         = sun6i_set_mode_a31,
 	.src_burst_lengths = BIT(1) | BIT(8),
 	.dst_burst_lengths = BIT(1) | BIT(8),
 	.src_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
@@ -1168,6 +1225,7 @@ static const struct of_device_id sun6i_dma_match[] = {
 	{ .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
 	{ .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg },
 	{ .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg },
+	{ .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg },
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, sun6i_dma_match);
@@ -1204,6 +1262,14 @@ static int sun6i_dma_probe(struct platform_device *pdev)
 		return PTR_ERR(sdc->clk);
 	}
 
+	if (sdc->cfg->has_mbus_clk) {
+		sdc->clk_mbus = devm_clk_get(&pdev->dev, "mbus");
+		if (IS_ERR(sdc->clk_mbus)) {
+			dev_err(&pdev->dev, "No mbus clock specified\n");
+			return PTR_ERR(sdc->clk_mbus);
+		}
+	}
+
 	sdc->rstc = devm_reset_control_get(&pdev->dev, NULL);
 	if (IS_ERR(sdc->rstc)) {
 		dev_err(&pdev->dev, "No reset controller specified\n");
@@ -1258,8 +1324,8 @@ static int sun6i_dma_probe(struct platform_device *pdev)
 	ret = of_property_read_u32(np, "dma-requests", &sdc->max_request);
 	if (ret && !sdc->max_request) {
 		dev_info(&pdev->dev, "Missing dma-requests, using %u.\n",
-			 DMA_CHAN_MAX_DRQ);
-		sdc->max_request = DMA_CHAN_MAX_DRQ;
+			 DMA_CHAN_MAX_DRQ_A31);
+		sdc->max_request = DMA_CHAN_MAX_DRQ_A31;
 	}
 
 	/*
@@ -1308,11 +1374,19 @@ static int sun6i_dma_probe(struct platform_device *pdev)
 		goto err_reset_assert;
 	}
 
+	if (sdc->cfg->has_mbus_clk) {
+		ret = clk_prepare_enable(sdc->clk_mbus);
+		if (ret) {
+			dev_err(&pdev->dev, "Couldn't enable mbus clock\n");
+			goto err_clk_disable;
+		}
+	}
+
 	ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0,
 			       dev_name(&pdev->dev), sdc);
 	if (ret) {
 		dev_err(&pdev->dev, "Cannot request IRQ\n");
-		goto err_clk_disable;
+		goto err_mbus_clk_disable;
 	}
 
 	ret = dma_async_device_register(&sdc->slave);
@@ -1337,6 +1411,8 @@ static int sun6i_dma_probe(struct platform_device *pdev)
 	dma_async_device_unregister(&sdc->slave);
 err_irq_disable:
 	sun6i_kill_tasklet(sdc);
+err_mbus_clk_disable:
+	clk_disable_unprepare(sdc->clk_mbus);
 err_clk_disable:
 	clk_disable_unprepare(sdc->clk);
 err_reset_assert:
@@ -1355,6 +1431,7 @@ static int sun6i_dma_remove(struct platform_device *pdev)
 
 	sun6i_kill_tasklet(sdc);
 
+	clk_disable_unprepare(sdc->clk_mbus);
 	clk_disable_unprepare(sdc->clk);
 	reset_control_assert(sdc->rstc);
 
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index ef317c90fbe1ed813845a3788dad3e5a1d5ae96f..79e9593815f1217eb5d1887ba144c856834f51a6 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -977,8 +977,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
 		csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
 	}
 
-	if (flags & DMA_PREP_INTERRUPT)
+	if (flags & DMA_PREP_INTERRUPT) {
 		csr |= TEGRA_APBDMA_CSR_IE_EOC;
+	} else {
+		WARN_ON_ONCE(1);
+		return NULL;
+	}
 
 	apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
 
@@ -1120,8 +1124,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
 		csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
 	}
 
-	if (flags & DMA_PREP_INTERRUPT)
+	if (flags & DMA_PREP_INTERRUPT) {
 		csr |= TEGRA_APBDMA_CSR_IE_EOC;
+	} else {
+		WARN_ON_ONCE(1);
+		return NULL;
+	}
 
 	apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
 
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index bb5390847257cc651eb4c1be62e1522c404e3ee0..ec4adf4260a098488fff4471e82c2f1a89b90c54 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -98,7 +98,7 @@ static void vchan_complete(unsigned long arg)
 	}
 	spin_unlock_irq(&vc->lock);
 
-	dmaengine_desc_callback_invoke(&cb, NULL);
+	dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
 
 	list_for_each_entry_safe(vd, _vd, &head, node) {
 		dmaengine_desc_get_callback(&vd->tx, &cb);
@@ -106,7 +106,7 @@ static void vchan_complete(unsigned long arg)
 		list_del(&vd->node);
 		vchan_vdesc_fini(vd);
 
-		dmaengine_desc_callback_invoke(&cb, NULL);
+		dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
 	}
 }
 
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 23342ca23d4a97135273394f666e4531d65fd56a..ab158bac03a7c81ae24cf8aaca5a599b3b223177 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -14,6 +14,7 @@
 
 struct virt_dma_desc {
 	struct dma_async_tx_descriptor tx;
+	struct dmaengine_result tx_result;
 	/* protected by vc.lock */
 	struct list_head node;
 };
@@ -62,6 +63,9 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
 	vd->tx.tx_submit = vchan_tx_submit;
 	vd->tx.desc_free = vchan_tx_desc_free;
 
+	vd->tx_result.result = DMA_TRANS_NOERROR;
+	vd->tx_result.residue = 0;
+
 	spin_lock_irqsave(&vc->lock, flags);
 	list_add_tail(&vd->node, &vc->desc_allocated);
 	spin_unlock_irqrestore(&vc->lock, flags);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 36c092349b5b7b45822b274708518fa9cdaad19b..e7dc3c4dc8e077d855410055539a2b88a16d9bfb 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -1095,7 +1095,7 @@ static void xilinx_dma_start(struct xilinx_dma_chan *chan)
 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
 {
 	struct xilinx_vdma_config *config = &chan->config;
-	struct xilinx_dma_tx_descriptor *desc, *tail_desc;
+	struct xilinx_dma_tx_descriptor *desc;
 	u32 reg, j;
 	struct xilinx_vdma_tx_segment *segment, *last = NULL;
 	int i = 0;
@@ -1112,8 +1112,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
 
 	desc = list_first_entry(&chan->pending_list,
 				struct xilinx_dma_tx_descriptor, node);
-	tail_desc = list_last_entry(&chan->pending_list,
-				    struct xilinx_dma_tx_descriptor, node);
 
 	/* Configure the hardware using info in the config structure */
 	if (chan->has_vflip) {
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 6765f10837acb8de119a4e9c1bd18ed8e1bea01a..6e208a060a585d2926a97fad210f5acac7baa24d 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -793,7 +793,7 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) },
+	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
 	  .driver_data = (kernel_ulong_t)&am654_data
 	},
diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c
index 4bb16e9bc29781f297a77b8b1b1f5387fe8941d4..d4aef9c4a94cd11cefc2a0aaf6045dd3ffb2fbc8 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra20.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra20.c
@@ -99,7 +99,7 @@ static int tegra20_fuse_probe(struct tegra_fuse *fuse)
 	dma_cap_zero(mask);
 	dma_cap_set(DMA_SLAVE, mask);
 
-	fuse->apbdma.chan = __dma_request_channel(&mask, dma_filter, NULL);
+	fuse->apbdma.chan = dma_request_channel(mask, dma_filter, NULL);
 	if (!fuse->apbdma.chan)
 		return -EPROBE_DEFER;
 
diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h
new file mode 100644
index 0000000000000000000000000000000000000000..cab6e18773dadd2149f6310e0c87316137419d24
--- /dev/null
+++ b/include/linux/dma/edma.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare eDMA core driver
+ *
+ * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+ */
+
+#ifndef _DW_EDMA_H
+#define _DW_EDMA_H
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+struct dw_edma;
+
+/**
+ * struct dw_edma_chip - representation of DesignWare eDMA controller hardware
+ * @dev:		 struct device of the eDMA controller
+ * @id:			 instance ID
+ * @irq:		 irq line
+ * @dw:			 struct dw_edma that is filed by dw_edma_probe()
+ */
+struct dw_edma_chip {
+	struct device		*dev;
+	int			id;
+	int			irq;
+	struct dw_edma		*dw;
+};
+
+/* Export to the platform drivers */
+#if IS_ENABLED(CONFIG_DW_EDMA)
+int dw_edma_probe(struct dw_edma_chip *chip);
+int dw_edma_remove(struct dw_edma_chip *chip);
+#else
+static inline int dw_edma_probe(struct dw_edma_chip *chip)
+{
+	return -ENODEV;
+}
+
+static inline int dw_edma_remove(struct dw_edma_chip *chip)
+{
+	return 0;
+}
+#endif /* CONFIG_DW_EDMA */
+
+#endif /* _DW_EDMA_H */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c952f987ee573552c46c067450accdd7afa6d383..8fcdee1c0cf9559fc99671bdfb64782acd08619f 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -1302,7 +1302,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
 void dma_issue_pending_all(void);
 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
-					dma_filter_fn fn, void *fn_param);
+				       dma_filter_fn fn, void *fn_param,
+				       struct device_node *np);
 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
 
 struct dma_chan *dma_request_chan(struct device *dev, const char *name);
@@ -1327,7 +1328,9 @@ static inline void dma_issue_pending_all(void)
 {
 }
 static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
-					      dma_filter_fn fn, void *fn_param)
+						     dma_filter_fn fn,
+						     void *fn_param,
+						     struct device_node *np)
 {
 	return NULL;
 }
@@ -1399,7 +1402,8 @@ void dma_async_device_unregister(struct dma_device *device);
 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
-#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
+#define dma_request_channel(mask, x, y) \
+	__dma_request_channel(&(mask), x, y, NULL)
 #define dma_request_slave_channel_compat(mask, x, y, dev, name) \
 	__dma_request_slave_channel_compat(&(mask), x, y, dev, name)
 
@@ -1417,6 +1421,6 @@ static inline struct dma_chan
 	if (!fn || !fn_param)
 		return NULL;
 
-	return __dma_request_channel(mask, fn, fn_param);
+	return __dma_request_channel(mask, fn, fn_param, NULL);
 }
 #endif /* DMAENGINE_H */
diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h
new file mode 100644
index 0000000000000000000000000000000000000000..7fc95d5c95bb80b21bb783dce7a3eb88dc462dd8
--- /dev/null
+++ b/include/linux/fpga/adi-axi-common.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Analog Devices AXI common registers & definitions
+ *
+ * Copyright 2019 Analog Devices Inc.
+ *
+ * https://wiki.analog.com/resources/fpga/docs/axi_ip
+ * https://wiki.analog.com/resources/fpga/docs/hdl/regmap
+ */
+
+#ifndef ADI_AXI_COMMON_H_
+#define ADI_AXI_COMMON_H_
+
+#define	ADI_AXI_REG_VERSION			0x0000
+
+#define ADI_AXI_PCORE_VER(major, minor, patch)	\
+	(((major) << 16) | ((minor) << 8) | (patch))
+
+#endif /* ADI_AXI_COMMON_H_ */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 40015609c4b56838e2546e3e8b9cfd3c330089db..c842735a4f45a41d4ce305d236d08a8b2d0ef3b6 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2367,6 +2367,7 @@
 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3		0xabcd
 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI	0xabce
 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31	0xabcf
+#define PCI_DEVICE_ID_SYNOPSYS_EDDA	0xedda
 
 #define PCI_VENDOR_ID_USR		0x16ec
 
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h
index 80f9be858bd08ce63212bca09d56f1ee547ae360..281adbb26e6bdabd0fd7548a1881f9d5565ebeff 100644
--- a/include/linux/platform_data/dma-imx.h
+++ b/include/linux/platform_data/dma-imx.h
@@ -52,7 +52,6 @@ struct imx_dma_data {
 	int dma_request2; /* secondary DMA request line */
 	enum sdma_peripheral_type peripheral_type;
 	int priority;
-	struct device_node *of_node;
 };
 
 static inline int imx_dma_is_ipu(struct dma_chan *chan)
diff --git a/include/linux/sudmac.h b/include/linux/sudmac.h
deleted file mode 100644
index cccc0a665d26ed737a50ac4f93bb8e90dcc797d0..0000000000000000000000000000000000000000
--- a/include/linux/sudmac.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Header for the SUDMAC driver
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- */
-#ifndef SUDMAC_H
-#define SUDMAC_H
-
-#include <linux/dmaengine.h>
-#include <linux/shdma-base.h>
-#include <linux/types.h>
-
-/* Used by slave DMA clients to request DMA to/from a specific peripheral */
-struct sudmac_slave {
-	struct shdma_slave	shdma_slave;	/* Set by the platform */
-};
-
-/*
- * Supplied by platforms to specify, how a DMA channel has to be configured for
- * a certain peripheral
- */
-struct sudmac_slave_config {
-	int		slave_id;
-};
-
-struct sudmac_channel {
-	unsigned long	offset;
-	unsigned long	config;
-	unsigned long	wait;		/* The configuable range is 0 to 3 */
-	unsigned long	dint_end_bit;
-};
-
-struct sudmac_pdata {
-	const struct sudmac_slave_config *slave;
-	int slave_num;
-	const struct sudmac_channel *channel;
-	int channel_num;
-};
-
-/* Definitions for the sudmac_channel.config */
-#define SUDMAC_TX_BUFFER_MODE	BIT(0)
-#define SUDMAC_RX_END_MODE	BIT(1)
-
-/* Definitions for the sudmac_channel.dint_end_bit */
-#define SUDMAC_DMA_BIT_CH0	BIT(0)
-#define SUDMAC_DMA_BIT_CH1	BIT(1)
-
-#endif