summaryrefslogtreecommitdiff
path: root/drivers/dma/fsl-edma-v3.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/fsl-edma-v3.c')
-rw-r--r--drivers/dma/fsl-edma-v3.c1374
1 files changed, 1374 insertions, 0 deletions
diff --git a/drivers/dma/fsl-edma-v3.c b/drivers/dma/fsl-edma-v3.c
new file mode 100644
index 000000000000..52073d8fb8eb
--- /dev/null
+++ b/drivers/dma/fsl-edma-v3.c
@@ -0,0 +1,1374 @@
+/*
+ * drivers/dma/fsl-edma3-v3.c
+ *
+ * Copyright 2017-2018 NXP .
+ *
+ * Driver for the Freescale eDMA engine v3. This driver based on fsl-edma3.c
+ * but changed to meet the IP change on i.MX8QM: every dma channel is specific
+ * to hardware. For example, channel 14 for LPUART1 receive request and channel
+ * 13 for transmit requesst. The eDMA block can be found on i.MX8QM
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+
+#include "virt-dma.h"
+
+#define EDMA_CH_CSR 0x00
+#define EDMA_CH_ES 0x04
+#define EDMA_CH_INT 0x08
+#define EDMA_CH_SBR 0x0C
+#define EDMA_CH_PRI 0x10
+#define EDMA_CH_MUX 0x14
+#define EDMA_TCD_SADDR 0x20
+#define EDMA_TCD_SOFF 0x24
+#define EDMA_TCD_ATTR 0x26
+#define EDMA_TCD_NBYTES 0x28
+#define EDMA_TCD_SLAST 0x2C
+#define EDMA_TCD_DADDR 0x30
+#define EDMA_TCD_DOFF 0x34
+#define EDMA_TCD_CITER_ELINK 0x36
+#define EDMA_TCD_CITER 0x36
+#define EDMA_TCD_DLAST_SGA 0x38
+#define EDMA_TCD_CSR 0x3C
+#define EDMA_TCD_BITER_ELINK 0x3E
+#define EDMA_TCD_BITER 0x3E
+
+#define EDMA_CH_SBR_RD BIT(22)
+#define EDMA_CH_SBR_WR BIT(21)
+#define EDMA_CH_CSR_ERQ BIT(0)
+#define EDMA_CH_CSR_EARQ BIT(1)
+#define EDMA_CH_CSR_EEI BIT(2)
+#define EDMA_CH_CSR_DONE BIT(30)
+#define EDMA_CH_CSR_ACTIVE BIT(31)
+
+#define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007))
+#define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3)
+#define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8)
+#define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11)
+#define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000)
+#define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100)
+#define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200)
+#define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300)
+#define EDMA_TCD_ATTR_SSIZE_16BYTE (0x0400)
+#define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500)
+#define EDMA_TCD_ATTR_SSIZE_64BYTE (0x0600)
+#define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000)
+#define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001)
+#define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002)
+#define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003)
+#define EDMA_TCD_ATTR_DSIZE_16BYTE (0x0004)
+#define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005)
+#define EDMA_TCD_ATTR_DSIZE_64BYTE (0x0006)
+
+#define EDMA_TCD_SOFF_SOFF(x) (x)
+#define EDMA_TCD_NBYTES_NBYTES(x) (x)
+#define EDMA_TCD_NBYTES_MLOFF(x) (x << 10)
+#define EDMA_TCD_NBYTES_DMLOE (1 << 30)
+#define EDMA_TCD_NBYTES_SMLOE (1 << 31)
+#define EDMA_TCD_SLAST_SLAST(x) (x)
+#define EDMA_TCD_DADDR_DADDR(x) (x)
+#define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF)
+#define EDMA_TCD_DOFF_DOFF(x) (x)
+#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x)
+#define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF)
+
+#define EDMA_TCD_CSR_START BIT(0)
+#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
+#define EDMA_TCD_CSR_INT_HALF BIT(2)
+#define EDMA_TCD_CSR_D_REQ BIT(3)
+#define EDMA_TCD_CSR_E_SG BIT(4)
+#define EDMA_TCD_CSR_E_LINK BIT(5)
+#define EDMA_TCD_CSR_EEOP BIT(6)
+#define EDMA_TCD_CSR_ESDA BIT(7)
+
+#define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_16_BYTES))
+
+#define ARGS_RX BIT(0)
+#define ARGS_REMOTE BIT(1)
+#define ARGS_DFIFO BIT(2)
+
+/* channel name template define in dts */
+#define CHAN_PREFIX "edma0-chan"
+#define CHAN_POSFIX "-tx"
+#define CLK_POSFIX "-clk"
+
+#define EDMA_MINOR_LOOP_TIMEOUT 500 /* us */
+
+struct fsl_edma3_hw_tcd {
+ __le32 saddr;
+ __le16 soff;
+ __le16 attr;
+ __le32 nbytes;
+ __le32 slast;
+ __le32 daddr;
+ __le16 doff;
+ __le16 citer;
+ __le32 dlast_sga;
+ __le16 csr;
+ __le16 biter;
+ /*
+ * Store Dest Address if EDMA_TCD_CSR_ESDA enabled which's used as
+ * 'eeop + cyclic'.
+ */
+ __le32 sda;
+};
+
+struct fsl_edma3_sw_tcd {
+ dma_addr_t ptcd;
+ struct fsl_edma3_hw_tcd *vtcd;
+};
+
+struct fsl_edma3_slave_config {
+ enum dma_transfer_direction dir;
+ enum dma_slave_buswidth addr_width;
+ u32 dev_addr;
+ u32 dev2_addr; /* source addr for dev2dev */
+ u32 burst;
+ u32 attr;
+};
+
+struct fsl_edma3_chan {
+ struct virt_dma_chan vchan;
+ enum dma_status status;
+ bool idle;
+ struct fsl_edma3_engine *edma3;
+ struct fsl_edma3_desc *edesc;
+ struct fsl_edma3_slave_config fsc;
+ void __iomem *membase;
+ int txirq;
+ int hw_chanid;
+ int priority;
+ int is_rxchan;
+ int is_remote;
+ int is_dfifo;
+ bool is_sw;
+ struct dma_pool *tcd_pool;
+ u32 chn_real_count;
+ char txirq_name[32];
+ struct platform_device *pdev;
+ struct device *dev;
+ struct work_struct issue_worker;
+ u32 srcid;
+ struct clk *clk;
+};
+
+struct fsl_edma3_drvdata {
+ bool has_pd;
+ u32 dmamuxs;
+};
+
+struct fsl_edma3_desc {
+ struct virt_dma_desc vdesc;
+ struct fsl_edma3_chan *echan;
+ bool iscyclic;
+ unsigned int n_tcds;
+ u32 curidx;
+ struct fsl_edma3_sw_tcd tcd[];
+};
+
+struct fsl_edma3_reg_save {
+ u32 csr;
+ u32 sbr;
+};
+
+struct fsl_edma3_engine {
+ struct dma_device dma_dev;
+ unsigned long irqflag;
+ struct mutex fsl_edma3_mutex;
+ u32 n_chans;
+ int errirq;
+ #define MAX_CHAN_NUM 32
+ struct fsl_edma3_reg_save edma_regs[MAX_CHAN_NUM];
+ bool swap; /* remote/local swapped on Audio edma */
+ const struct fsl_edma3_drvdata *drvdata;
+ struct clk *clk_mp;
+ struct fsl_edma3_chan chans[];
+};
+
+
+static struct fsl_edma3_drvdata fsl_edma_imx8q = {
+ .has_pd = true,
+ .dmamuxs = 0,
+};
+
+static struct fsl_edma3_drvdata fsl_edma_imx8ulp = {
+ .has_pd = false,
+ .dmamuxs = 1,
+};
+
+static struct fsl_edma3_chan *to_fsl_edma3_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct fsl_edma3_chan, vchan.chan);
+}
+
+static struct fsl_edma3_desc *to_fsl_edma3_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct fsl_edma3_desc, vdesc);
+}
+
+static void fsl_edma3_enable_request(struct fsl_edma3_chan *fsl_chan)
+{
+ void __iomem *addr = fsl_chan->membase;
+ u32 val;
+
+ val = readl(addr + EDMA_CH_SBR);
+ /* Remote/local swapped wrongly on iMX8 QM Audio edma */
+ if (fsl_chan->edma3->swap) {
+ if (!fsl_chan->is_rxchan)
+ val |= EDMA_CH_SBR_RD;
+ else
+ val |= EDMA_CH_SBR_WR;
+ } else {
+ if (fsl_chan->is_rxchan)
+ val |= EDMA_CH_SBR_RD;
+ else
+ val |= EDMA_CH_SBR_WR;
+ }
+
+ if (fsl_chan->is_remote)
+ val &= ~(EDMA_CH_SBR_RD | EDMA_CH_SBR_WR);
+
+ writel(val, addr + EDMA_CH_SBR);
+
+ if (fsl_chan->srcid && !readl(addr + EDMA_CH_MUX))
+ writel(fsl_chan->srcid, addr + EDMA_CH_MUX);
+
+ val = readl(addr + EDMA_CH_CSR);
+
+ val |= EDMA_CH_CSR_ERQ;
+ writel(val, addr + EDMA_CH_CSR);
+}
+
+static void fsl_edma3_disable_request(struct fsl_edma3_chan *fsl_chan)
+{
+ void __iomem *addr = fsl_chan->membase;
+ u32 val = readl(addr + EDMA_CH_CSR);
+
+ if (fsl_chan->srcid)
+ writel(0, addr + EDMA_CH_MUX);
+
+ val &= ~EDMA_CH_CSR_ERQ;
+ writel(val, addr + EDMA_CH_CSR);
+}
+
+static unsigned int fsl_edma3_get_tcd_attr(enum dma_slave_buswidth addr_width)
+{
+ switch (addr_width) {
+ case 1:
+ return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
+ case 2:
+ return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
+ case 4:
+ return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+ case 8:
+ return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
+ case 16:
+ return EDMA_TCD_ATTR_SSIZE_16BYTE | EDMA_TCD_ATTR_DSIZE_16BYTE;
+ case 32:
+ return EDMA_TCD_ATTR_SSIZE_32BYTE | EDMA_TCD_ATTR_DSIZE_32BYTE;
+ case 64:
+ return EDMA_TCD_ATTR_SSIZE_64BYTE | EDMA_TCD_ATTR_DSIZE_64BYTE;
+ default:
+ return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+ }
+}
+
+static void fsl_edma3_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct fsl_edma3_desc *fsl_desc;
+ int i;
+
+ fsl_desc = to_fsl_edma3_desc(vdesc);
+ for (i = 0; i < fsl_desc->n_tcds; i++)
+ dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
+ fsl_desc->tcd[i].ptcd);
+ kfree(fsl_desc);
+}
+
+static int fsl_edma3_terminate_all(struct dma_chan *chan)
+{
+ struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+ u32 val;
+
+ fsl_edma3_disable_request(fsl_chan);
+
+ /*
+ * Checking ACTIVE to ensure minor loop stop indeed to prevent the
+ * potential illegal memory write if channel not stopped with buffer
+ * freed. Ignore tx channel since no such risk.
+ */
+ if (fsl_chan->is_rxchan)
+ readl_poll_timeout_atomic(fsl_chan->membase + EDMA_CH_CSR, val,
+ !(val & EDMA_CH_CSR_ACTIVE), 2,
+ EDMA_MINOR_LOOP_TIMEOUT);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ fsl_chan->edesc = NULL;
+ fsl_chan->idle = true;
+ fsl_chan->vchan.cyclic = NULL;
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+
+ if (fsl_chan->edma3->drvdata->has_pd)
+ pm_runtime_allow(fsl_chan->dev);
+
+ return 0;
+}
+
+static int fsl_edma3_pause(struct dma_chan *chan)
+{
+ struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ if (fsl_chan->edesc) {
+ fsl_edma3_disable_request(fsl_chan);
+ fsl_chan->status = DMA_PAUSED;
+ fsl_chan->idle = true;
+ }
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ return 0;
+}
+
+static int fsl_edma3_resume(struct dma_chan *chan)
+{
+ struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ if (fsl_chan->edesc) {
+ fsl_edma3_enable_request(fsl_chan);
+ fsl_chan->status = DMA_IN_PROGRESS;
+ fsl_chan->idle = false;
+ }
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ return 0;
+}
+
+static int fsl_edma3_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
+
+ fsl_chan->fsc.dir = cfg->direction;
+ if (cfg->direction == DMA_DEV_TO_MEM) {
+ fsl_chan->fsc.dev_addr = cfg->src_addr;
+ fsl_chan->fsc.addr_width = cfg->src_addr_width;
+ fsl_chan->fsc.burst = cfg->src_maxburst;
+ fsl_chan->fsc.attr = fsl_edma3_get_tcd_attr
+ (cfg->src_addr_width);
+ } else if (cfg->direction == DMA_MEM_TO_DEV) {
+ fsl_chan->fsc.dev_addr = cfg->dst_addr;
+ fsl_chan->fsc.addr_width = cfg->dst_addr_width;
+ fsl_chan->fsc.burst = cfg->dst_maxburst;
+ fsl_chan->fsc.attr = fsl_edma3_get_tcd_attr
+ (cfg->dst_addr_width);
+ } else if (cfg->direction == DMA_DEV_TO_DEV) {
+ fsl_chan->fsc.dev2_addr = cfg->src_addr;
+ fsl_chan->fsc.dev_addr = cfg->dst_addr;
+ fsl_chan->fsc.addr_width = cfg->dst_addr_width;
+ fsl_chan->fsc.burst = cfg->dst_maxburst;
+ fsl_chan->fsc.attr = fsl_edma3_get_tcd_attr
+ (cfg->dst_addr_width);
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static size_t fsl_edma3_desc_residue(struct fsl_edma3_chan *fsl_chan,
+ struct virt_dma_desc *vdesc, bool in_progress)
+{
+ struct fsl_edma3_desc *edesc = fsl_chan->edesc;
+ void __iomem *addr = fsl_chan->membase;
+ enum dma_transfer_direction dir = fsl_chan->fsc.dir;
+ dma_addr_t cur_addr, dma_addr;
+ size_t len, size;
+ int i;
+
+ /* calculate the total size in this desc */
+ for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
+ len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
+ * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+
+ if (!in_progress)
+ return len;
+
+ if (dir == DMA_MEM_TO_DEV)
+ cur_addr = readl(addr + EDMA_TCD_SADDR);
+ else
+ cur_addr = readl(addr + EDMA_TCD_DADDR);
+
+ /* skip the tcds before curidx in cyclic case */
+ if (edesc->iscyclic)
+ len -= edesc->curidx * le32_to_cpu(edesc->tcd[0].vtcd->nbytes)
+ * le16_to_cpu(edesc->tcd[0].vtcd->biter);
+
+ /* figure out the finished and calculate the residue */
+ for (i = edesc->curidx; i < fsl_chan->edesc->n_tcds; i++) {
+ if (dir == DMA_DEV_TO_MEM)
+ cur_addr = le32_to_cpu(edesc->tcd[i].vtcd->sda);
+
+ size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
+ * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ if (dir == DMA_MEM_TO_DEV)
+ dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
+ else
+ dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
+
+ len -= size;
+ if (cur_addr > dma_addr && cur_addr <= dma_addr + size) {
+ len += dma_addr + size - cur_addr;
+ /*
+ * mark curidx as the start point in the next time
+ * fsl_edma3_desc_residue(called by fsl_edma3_tx_status
+ * commonly per cyclic interrupt/callback)
+ */
+ if (edesc->iscyclic)
+ edesc->curidx = (i + 1) % edesc->n_tcds;
+ break;
+ }
+ }
+
+ return len;
+}
+
+static enum dma_status fsl_edma3_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ unsigned long flags;
+
+ status = dma_cookie_status(chan, cookie, txstate);
+ if (status == DMA_COMPLETE) {
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ txstate->residue = fsl_chan->chn_real_count;
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ return status;
+ }
+
+ if (!txstate)
+ return fsl_chan->status;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
+ if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
+ txstate->residue = fsl_edma3_desc_residue(fsl_chan, vdesc,
+ true);
+ else if (fsl_chan->edesc && vdesc)
+ txstate->residue = fsl_edma3_desc_residue(fsl_chan, vdesc,
+ false);
+ else
+ txstate->residue = 0;
+
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ return fsl_chan->status;
+}
+
+static void fsl_edma3_set_tcd_regs(struct fsl_edma3_chan *fsl_chan,
+ struct fsl_edma3_hw_tcd *tcd)
+{
+ void __iomem *addr = fsl_chan->membase;
+ /*
+ * TCD parameters are stored in struct fsl_edma3_hw_tcd in little
+ * endian format. However, we need to load the TCD registers in
+ * big- or little-endian obeying the eDMA engine model endian.
+ */
+ writew(0, addr + EDMA_TCD_CSR);
+ writel(le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR);
+ writel(le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR);
+
+ writew(le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR);
+ writew(le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF);
+
+ writel(le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES);
+ writel(le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST);
+
+ writew(le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER);
+ writew(le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER);
+ writew(le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF);
+
+ writel(le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA);
+
+ /* Must clear CHa_CSR[DONE] bit before enable TCDa_CSR[ESG] */
+ writel(readl(addr + EDMA_CH_CSR), addr + EDMA_CH_CSR);
+
+ writew(le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR);
+}
+
+static inline
+void fsl_edma3_fill_tcd(struct fsl_edma3_chan *fsl_chan,
+ struct fsl_edma3_sw_tcd *sw_tcd, u32 src, u32 dst,
+ u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
+ u16 biter, u16 doff, u32 dlast_sga, bool major_int,
+ bool disable_req, bool enable_sg)
+{
+ struct fsl_edma3_hw_tcd *tcd = sw_tcd->vtcd;
+ u32 slast_sda = sw_tcd->ptcd + offsetof(struct fsl_edma3_hw_tcd, sda);
+ u16 csr = 0;
+
+ /*
+ * eDMA hardware SGs require the TCDs to be stored in little
+ * endian format irrespective of the register endian model.
+ * So we put the value in little endian in memory, waiting
+ * for fsl_edma3_set_tcd_regs doing the swap.
+ */
+ tcd->saddr = cpu_to_le32(src);
+ tcd->daddr = cpu_to_le32(dst);
+
+ tcd->attr = cpu_to_le16(attr);
+
+ tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff));
+
+ if (fsl_chan->is_dfifo) {
+ /* set mloff as -8 */
+ nbytes |= EDMA_TCD_NBYTES_MLOFF(-8);
+ /* enable DMLOE/SMLOE */
+ if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
+ nbytes |= EDMA_TCD_NBYTES_DMLOE;
+ nbytes &= ~EDMA_TCD_NBYTES_SMLOE;
+ } else {
+ nbytes |= EDMA_TCD_NBYTES_SMLOE;
+ nbytes &= ~EDMA_TCD_NBYTES_DMLOE;
+ }
+ }
+
+ tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes));
+
+ if (fsl_chan->is_rxchan)
+ tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast_sda));
+ else
+ tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast));
+
+ tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
+ tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff));
+
+ tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga));
+
+ tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
+ if (major_int)
+ csr |= EDMA_TCD_CSR_INT_MAJOR;
+
+ if (disable_req)
+ csr |= EDMA_TCD_CSR_D_REQ;
+
+ if (enable_sg)
+ csr |= EDMA_TCD_CSR_E_SG;
+
+ if (fsl_chan->is_rxchan)
+ csr |= EDMA_TCD_CSR_EEOP | EDMA_TCD_CSR_ESDA;
+
+ if (fsl_chan->is_sw)
+ csr |= EDMA_TCD_CSR_START;
+
+ tcd->csr = cpu_to_le16(csr);
+}
+
+static struct fsl_edma3_desc *fsl_edma3_alloc_desc(struct fsl_edma3_chan
+ *fsl_chan, int sg_len)
+{
+ struct fsl_edma3_desc *fsl_desc;
+ int i;
+
+ fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma3_sw_tcd)
+ * sg_len, GFP_ATOMIC);
+ if (!fsl_desc)
+ return NULL;
+
+ fsl_desc->echan = fsl_chan;
+ fsl_desc->n_tcds = sg_len;
+ for (i = 0; i < sg_len; i++) {
+ fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
+ GFP_ATOMIC, &fsl_desc->tcd[i].ptcd);
+ if (!fsl_desc->tcd[i].vtcd)
+ goto err;
+ }
+ return fsl_desc;
+
+err:
+ while (--i >= 0)
+ dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
+ fsl_desc->tcd[i].ptcd);
+ kfree(fsl_desc);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *fsl_edma3_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
+ struct fsl_edma3_desc *fsl_desc;
+ dma_addr_t dma_buf_next;
+ int sg_len, i;
+ u32 src_addr, dst_addr, last_sg, nbytes;
+ u16 soff, doff, iter;
+ bool major_int = true;
+
+ sg_len = buf_len / period_len;
+ fsl_desc = fsl_edma3_alloc_desc(fsl_chan, sg_len);
+ if (!fsl_desc)
+ return NULL;
+ fsl_desc->iscyclic = true;
+
+ dma_buf_next = dma_addr;
+ nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
+
+ /*
+ * Choose the suitable burst length if period_len is not multiple of
+ * burst length so that the whole transfer length is multiple of minor
+ * loop(burst length).
+ */
+ if (period_len % nbytes) {
+ u32 width = fsl_chan->fsc.addr_width;
+
+ for (i = fsl_chan->fsc.burst; i > 1; i--) {
+ if (!(period_len % (i * width))) {
+ nbytes = i * width;
+ break;
+ }
+ }
+ /* if no chance to get suitable burst size, use it as 1 */
+ if (i == 1)
+ nbytes = width;
+ }
+
+ iter = period_len / nbytes;
+
+ for (i = 0; i < sg_len; i++) {
+ if (dma_buf_next >= dma_addr + buf_len)
+ dma_buf_next = dma_addr;
+
+ /* get next sg's physical address */
+ last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+ if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
+ src_addr = dma_buf_next;
+ dst_addr = fsl_chan->fsc.dev_addr;
+ soff = fsl_chan->fsc.addr_width;
+ if (fsl_chan->is_dfifo)
+ doff = 4;
+ else
+ doff = 0;
+ } else if (fsl_chan->fsc.dir == DMA_DEV_TO_MEM) {
+ src_addr = fsl_chan->fsc.dev_addr;
+ dst_addr = dma_buf_next;
+ if (fsl_chan->is_dfifo)
+ soff = 4;
+ else
+ soff = 0;
+ doff = fsl_chan->fsc.addr_width;
+ } else {
+ /* DMA_DEV_TO_DEV */
+ src_addr = fsl_chan->fsc.dev2_addr;
+ dst_addr = fsl_chan->fsc.dev_addr;
+ soff = 0;
+ doff = 0;
+ major_int = false;
+ }
+
+ fsl_edma3_fill_tcd(fsl_chan, &fsl_desc->tcd[i], src_addr,
+ dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0,
+ iter, iter, doff, last_sg, major_int, false, true);
+ dma_buf_next += period_len;
+ }
+
+ fsl_desc->curidx = 0;
+
+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *fsl_edma3_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
+ struct fsl_edma3_desc *fsl_desc;
+ struct scatterlist *sg;
+ u32 src_addr, dst_addr, last_sg, nbytes;
+ u16 soff, doff, iter;
+ int i;
+
+ if (!is_slave_direction(fsl_chan->fsc.dir))
+ return NULL;
+
+ fsl_desc = fsl_edma3_alloc_desc(fsl_chan, sg_len);
+ if (!fsl_desc)
+ return NULL;
+ fsl_desc->iscyclic = false;
+
+ nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
+ for_each_sg(sgl, sg, sg_len, i) {
+ /* get next sg's physical address */
+ last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+ if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
+ src_addr = sg_dma_address(sg);
+ dst_addr = fsl_chan->fsc.dev_addr;
+ soff = fsl_chan->fsc.addr_width;
+ doff = 0;
+ } else if (fsl_chan->fsc.dir == DMA_DEV_TO_MEM) {
+ src_addr = fsl_chan->fsc.dev_addr;
+ dst_addr = sg_dma_address(sg);
+ soff = 0;
+ doff = fsl_chan->fsc.addr_width;
+ } else {
+ /* DMA_DEV_TO_DEV */
+ src_addr = fsl_chan->fsc.dev2_addr;
+ dst_addr = fsl_chan->fsc.dev_addr;
+ soff = 0;
+ doff = 0;
+ }
+
+ /*
+ * Choose the suitable burst length if sg_dma_len is not
+ * multiple of burst length so that the whole transfer length is
+ * multiple of minor loop(burst length).
+ */
+ if (sg_dma_len(sg) % nbytes) {
+ u32 width = fsl_chan->fsc.addr_width;
+ int j;
+
+ for (j = fsl_chan->fsc.burst; j > 1; j--) {
+ if (!(sg_dma_len(sg) % (j * width))) {
+ nbytes = j * width;
+ break;
+ }
+ }
+ /* Set burst size as 1 if there's no suitable one */
+ if (j == 1)
+ nbytes = width;
+ }
+
+ iter = sg_dma_len(sg) / nbytes;
+ if (i < sg_len - 1) {
+ last_sg = fsl_desc->tcd[(i + 1)].ptcd;
+ fsl_edma3_fill_tcd(fsl_chan, &fsl_desc->tcd[i],
+ src_addr, dst_addr, fsl_chan->fsc.attr,
+ soff, nbytes, 0, iter, iter, doff,
+ last_sg, false, false, true);
+ } else {
+ last_sg = 0;
+ fsl_edma3_fill_tcd(fsl_chan, &fsl_desc->tcd[i],
+ src_addr, dst_addr, fsl_chan->fsc.attr,
+ soff, nbytes, 0, iter, iter, doff,
+ last_sg, true, true, false);
+ }
+ }
+
+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+
+static void fsl_edma3_xfer_desc(struct fsl_edma3_chan *fsl_chan)
+{
+ struct virt_dma_desc *vdesc;
+
+ vdesc = vchan_next_desc(&fsl_chan->vchan);
+ if (!vdesc)
+ return;
+ fsl_chan->edesc = to_fsl_edma3_desc(vdesc);
+
+ fsl_edma3_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
+ fsl_edma3_enable_request(fsl_chan);
+ fsl_chan->status = DMA_IN_PROGRESS;
+ fsl_chan->idle = false;
+}
+
+static struct dma_async_tx_descriptor *fsl_edma3_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
+ struct fsl_edma3_desc *fsl_desc;
+
+ fsl_desc = fsl_edma3_alloc_desc(fsl_chan, 1);
+ if (!fsl_desc)
+ return NULL;
+ fsl_desc->iscyclic = false;
+
+ fsl_chan->is_sw = true;
+
+ /* To match with copy_align and max_seg_size so 1 tcd is enough */
+ fsl_edma3_fill_tcd(fsl_chan, &fsl_desc->tcd[0], dma_src, dma_dst,
+ EDMA_TCD_ATTR_SSIZE_64BYTE | EDMA_TCD_ATTR_DSIZE_64BYTE,
+ 64, len, 0, 1, 1, 64, 0, true, true, false);
+
+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+
+static size_t fsl_edma3_desc_residue(struct fsl_edma3_chan *fsl_chan,
+ struct virt_dma_desc *vdesc, bool in_progress);
+
+static void fsl_edma3_get_realcnt(struct fsl_edma3_chan *fsl_chan)
+{
+ fsl_chan->chn_real_count = fsl_edma3_desc_residue(fsl_chan, NULL, true);
+}
+
+static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
+{
+ struct fsl_edma3_chan *fsl_chan = dev_id;
+ unsigned int intr;
+ void __iomem *base_addr;
+
+ spin_lock(&fsl_chan->vchan.lock);
+
+ /* Ignore this interrupt since channel has been freeed with power off */
+ if (!fsl_chan->edesc && !fsl_chan->tcd_pool)
+ goto irq_handled;
+
+ base_addr = fsl_chan->membase;
+
+ intr = readl(base_addr + EDMA_CH_INT);
+ if (!intr)
+ goto irq_handled;
+
+ writel(1, base_addr + EDMA_CH_INT);
+
+ /* Ignore this interrupt since channel has been disabled already */
+ if (!fsl_chan->edesc)
+ goto irq_handled;
+
+ if (!fsl_chan->edesc->iscyclic) {
+ fsl_edma3_get_realcnt(fsl_chan);
+ list_del(&fsl_chan->edesc->vdesc.node);
+ vchan_cookie_complete(&fsl_chan->edesc->vdesc);
+ fsl_chan->edesc = NULL;
+ fsl_chan->status = DMA_COMPLETE;
+ fsl_chan->idle = true;
+ } else {
+ vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
+ }
+
+ if (!fsl_chan->edesc)
+ fsl_edma3_xfer_desc(fsl_chan);
+irq_handled:
+ spin_unlock(&fsl_chan->vchan.lock);
+
+ return IRQ_HANDLED;
+}
+
+static void fsl_edma3_issue_pending(struct dma_chan *chan)
+{
+ struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
+
+ schedule_work(&fsl_chan->issue_worker);
+}
+
+static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct fsl_edma3_engine *fsl_edma3 = ofdma->of_dma_data;
+ struct dma_chan *chan, *_chan;
+ struct fsl_edma3_chan *fsl_chan;
+
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+ mutex_lock(&fsl_edma3->fsl_edma3_mutex);
+ list_for_each_entry_safe(chan, _chan, &fsl_edma3->dma_dev.channels,
+ device_node) {
+ if (chan->client_count)
+ continue;
+
+ fsl_chan = to_fsl_edma3_chan(chan);
+ if (fsl_edma3->drvdata->dmamuxs == 0 &&
+ fsl_chan->hw_chanid == dma_spec->args[0]) {
+ chan = dma_get_slave_channel(chan);
+ chan->device->privatecnt++;
+ fsl_chan->priority = dma_spec->args[1];
+ fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
+ fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
+ fsl_chan->is_dfifo = dma_spec->args[2] & ARGS_DFIFO;
+ mutex_unlock(&fsl_edma3->fsl_edma3_mutex);
+ return chan;
+ } else if (fsl_edma3->drvdata->dmamuxs && !fsl_chan->srcid) {
+ chan = dma_get_slave_channel(chan);
+ chan->device->privatecnt++;
+ fsl_chan->priority = dma_spec->args[1];
+ fsl_chan->srcid = dma_spec->args[0];
+ fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
+ fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
+ fsl_chan->is_dfifo = dma_spec->args[2] & ARGS_DFIFO;
+ mutex_unlock(&fsl_edma3->fsl_edma3_mutex);
+ return chan;
+ }
+ }
+ mutex_unlock(&fsl_edma3->fsl_edma3_mutex);
+ return NULL;
+}
+
+static int fsl_edma3_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
+ struct platform_device *pdev = fsl_chan->pdev;
+ int ret;
+
+ clk_prepare_enable(fsl_chan->clk);
+
+ fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
+ sizeof(struct fsl_edma3_hw_tcd),
+ 32, 0);
+
+ if (fsl_chan->edma3->drvdata->has_pd) {
+ pm_runtime_get_sync(fsl_chan->dev);
+ /* clear meaningless pending irq anyway */
+ if (readl(fsl_chan->membase + EDMA_CH_INT))
+ writel(1, fsl_chan->membase + EDMA_CH_INT);
+ }
+
+ ret = devm_request_irq(&pdev->dev, fsl_chan->txirq,
+ fsl_edma3_tx_handler, fsl_chan->edma3->irqflag,
+ fsl_chan->txirq_name, fsl_chan);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register %s IRQ.\n",
+ fsl_chan->txirq_name);
+ if (fsl_chan->edma3->drvdata->has_pd)
+ pm_runtime_put_sync_suspend(fsl_chan->dev);
+
+ return ret;
+ }
+
+ if (fsl_chan->edma3->drvdata->has_pd) {
+ pm_runtime_mark_last_busy(fsl_chan->dev);
+ pm_runtime_put_autosuspend(fsl_chan->dev);
+ }
+
+ return 0;
+}
+
+static void fsl_edma3_free_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ if (fsl_chan->edma3->drvdata->has_pd)
+ pm_runtime_get_sync(fsl_chan->dev);
+
+ devm_free_irq(&fsl_chan->pdev->dev, fsl_chan->txirq, fsl_chan);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ fsl_edma3_disable_request(fsl_chan);
+ fsl_chan->edesc = NULL;
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+ dma_pool_destroy(fsl_chan->tcd_pool);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ fsl_chan->tcd_pool = NULL;
+ fsl_chan->srcid = 0;
+ /* Clear interrupt before power off */
+ if (readl(fsl_chan->membase + EDMA_CH_INT))
+ writel(1, fsl_chan->membase + EDMA_CH_INT);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ if (fsl_chan->edma3->drvdata->has_pd)
+ pm_runtime_put_sync_suspend(fsl_chan->dev);
+
+ clk_disable_unprepare(fsl_chan->clk);
+ fsl_chan->is_sw = false;
+}
+
+static void fsl_edma3_synchronize(struct dma_chan *chan)
+{
+ struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
+
+ vchan_synchronize(&fsl_chan->vchan);
+}
+
+static struct device *fsl_edma3_attach_pd(struct device *dev,
+ struct device_node *np, int index)
+{
+ const char *domn = "edma0-chan01";
+ struct device *pd_chan;
+ struct device_link *link;
+ int ret;
+
+ ret = of_property_read_string_index(np, "power-domain-names", index,
+ &domn);
+ if (ret) {
+ dev_err(dev, "parse power-domain-names error.(%d)\n", ret);
+ return NULL;
+ }
+
+ pd_chan = dev_pm_domain_attach_by_name(dev, domn);
+ if (IS_ERR_OR_NULL(pd_chan))
+ return NULL;
+
+ link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (IS_ERR(link)) {
+ dev_err(dev, "Failed to add device_link to %s: %ld\n", domn,
+ PTR_ERR(link));
+ return NULL;
+ }
+
+ return pd_chan;
+}
+
+static void fsl_edma3_issue_work(struct work_struct *work)
+{
+ struct fsl_edma3_chan *fsl_chan = container_of(work,
+ struct fsl_edma3_chan,
+ issue_worker);
+ unsigned long flags;
+
+ if (fsl_chan->edma3->drvdata->has_pd)
+ pm_runtime_forbid(fsl_chan->dev);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+
+ if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
+ fsl_edma3_xfer_desc(fsl_chan);
+
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+}
+
+static const struct of_device_id fsl_edma3_dt_ids[] = {
+ { .compatible = "fsl,imx8qm-edma", .data = &fsl_edma_imx8q},
+ { .compatible = "fsl,imx8qm-adma", .data = &fsl_edma_imx8q},
+ { .compatible = "fsl,imx8ulp-edma", .data = &fsl_edma_imx8ulp},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_edma3_dt_ids);
+
+static int fsl_edma3_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(fsl_edma3_dt_ids, &pdev->dev);
+ struct fsl_edma3_engine *fsl_edma3;
+ struct fsl_edma3_chan *fsl_chan;
+ struct resource *res_mp;
+ struct resource *res;
+ int len, chans;
+ int ret, i;
+
+ if (!of_id)
+ return -EINVAL;
+
+ ret = of_property_read_u32(np, "dma-channels", &chans);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get dma-channels.\n");
+ return ret;
+ }
+
+ len = sizeof(*fsl_edma3) + sizeof(*fsl_chan) * chans;
+ fsl_edma3 = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!fsl_edma3)
+ return -ENOMEM;
+
+ /* Audio edma rx/tx channel shared interrupt */
+ if (of_property_read_bool(np, "shared-interrupt"))
+ fsl_edma3->irqflag = IRQF_SHARED;
+
+ fsl_edma3->swap = of_device_is_compatible(np, "fsl,imx8qm-adma");
+ fsl_edma3->n_chans = chans;
+ fsl_edma3->drvdata = (const struct fsl_edma3_drvdata *)of_id->data;
+
+ INIT_LIST_HEAD(&fsl_edma3->dma_dev.channels);
+
+ res_mp = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ fsl_edma3->clk_mp = devm_clk_get_optional(&pdev->dev, "edma-mp-clk");
+ if (IS_ERR(fsl_edma3->clk_mp)) {
+ dev_err(&pdev->dev, "Can't get mp clk.\n");
+ return PTR_ERR(fsl_edma3->clk_mp);
+ }
+ clk_prepare_enable(fsl_edma3->clk_mp);
+
+ for (i = 0; i < fsl_edma3->n_chans; i++) {
+ struct fsl_edma3_chan *fsl_chan = &fsl_edma3->chans[i];
+ const char *txirq_name;
+ char chanid[3], id_len = 0;
+ char clk_name[18];
+ char *p = chanid;
+ unsigned long val;
+
+ fsl_chan->edma3 = fsl_edma3;
+ fsl_chan->pdev = pdev;
+ fsl_chan->idle = true;
+ fsl_chan->srcid = 0;
+ /* Get per channel membase */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i + 1);
+ fsl_chan->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsl_chan->membase))
+ return PTR_ERR(fsl_chan->membase);
+
+ /* Get the hardware chanel id by the channel membase
+ * channel0:0x10000, channel1:0x20000... total 32 channels
+ * Note: skip first res_mp which we don't care.
+ */
+ fsl_chan->hw_chanid = ((res->start - res_mp->start) >> 16) & 0x3f;
+ fsl_chan->hw_chanid--;
+
+ ret = of_property_read_string_index(np, "interrupt-names", i,
+ &txirq_name);
+ if (ret) {
+ dev_err(&pdev->dev, "read interrupt-names fail.\n");
+ return ret;
+ }
+ /* Get channel id length from dts, one-digit or double-digit */
+ id_len = strlen(txirq_name) - strlen(CHAN_PREFIX) -
+ strlen(CHAN_POSFIX);
+ if (id_len > 2) {
+ dev_err(&pdev->dev, "%s is edmaX-chanX-tx in dts?\n",
+ res->name);
+ return -EINVAL;
+ }
+ /* Grab channel id from txirq_name */
+ strncpy(p, txirq_name + strlen(CHAN_PREFIX), id_len);
+ *(p + id_len) = '\0';
+
+ /* check if the channel id match well with hw_chanid */
+ ret = kstrtoul(chanid, 0, &val);
+ if (ret || val != fsl_chan->hw_chanid) {
+ dev_err(&pdev->dev, "%s,wrong id?\n", txirq_name);
+ return -EINVAL;
+ }
+
+ /* request channel irq */
+ fsl_chan->txirq = platform_get_irq_byname(pdev, txirq_name);
+ if (fsl_chan->txirq < 0) {
+ dev_err(&pdev->dev, "Can't get %s irq.\n", txirq_name);
+ return fsl_chan->txirq;
+ }
+
+ memcpy(fsl_chan->txirq_name, txirq_name, strlen(txirq_name));
+
+ strncpy(clk_name, txirq_name, strlen(CHAN_PREFIX) + id_len);
+ strcpy(clk_name + strlen(CHAN_PREFIX) + id_len, CLK_POSFIX);
+ fsl_chan->clk = devm_clk_get_optional(&pdev->dev,
+ (const char *)clk_name);
+ if (IS_ERR(fsl_chan->clk))
+ return PTR_ERR(fsl_chan->clk);
+
+ fsl_chan->vchan.desc_free = fsl_edma3_free_desc;
+ vchan_init(&fsl_chan->vchan, &fsl_edma3->dma_dev);
+
+ INIT_WORK(&fsl_chan->issue_worker,
+ fsl_edma3_issue_work);
+ }
+
+ mutex_init(&fsl_edma3->fsl_edma3_mutex);
+
+ dma_cap_set(DMA_PRIVATE, fsl_edma3->dma_dev.cap_mask);
+ dma_cap_set(DMA_SLAVE, fsl_edma3->dma_dev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, fsl_edma3->dma_dev.cap_mask);
+ dma_cap_set(DMA_MEMCPY, fsl_edma3->dma_dev.cap_mask);
+
+ fsl_edma3->dma_dev.dev = &pdev->dev;
+ fsl_edma3->dma_dev.device_alloc_chan_resources
+ = fsl_edma3_alloc_chan_resources;
+ fsl_edma3->dma_dev.device_free_chan_resources
+ = fsl_edma3_free_chan_resources;
+ fsl_edma3->dma_dev.device_tx_status = fsl_edma3_tx_status;
+ fsl_edma3->dma_dev.device_prep_slave_sg = fsl_edma3_prep_slave_sg;
+ fsl_edma3->dma_dev.device_prep_dma_memcpy = fsl_edma3_prep_memcpy;
+ fsl_edma3->dma_dev.device_prep_dma_cyclic = fsl_edma3_prep_dma_cyclic;
+ fsl_edma3->dma_dev.device_config = fsl_edma3_slave_config;
+ fsl_edma3->dma_dev.device_pause = fsl_edma3_pause;
+ fsl_edma3->dma_dev.device_resume = fsl_edma3_resume;
+ fsl_edma3->dma_dev.device_terminate_all = fsl_edma3_terminate_all;
+ fsl_edma3->dma_dev.device_issue_pending = fsl_edma3_issue_pending;
+ fsl_edma3->dma_dev.device_synchronize = fsl_edma3_synchronize;
+
+ fsl_edma3->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
+ fsl_edma3->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
+ fsl_edma3->dma_dev.directions = BIT(DMA_DEV_TO_MEM) |
+ BIT(DMA_MEM_TO_DEV) |
+ BIT(DMA_DEV_TO_DEV);
+
+ fsl_edma3->dma_dev.copy_align = DMAENGINE_ALIGN_64_BYTES;
+ /* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
+ dma_set_max_seg_size(fsl_edma3->dma_dev.dev, 0x7fff);
+
+ platform_set_drvdata(pdev, fsl_edma3);
+
+ ret = dma_async_device_register(&fsl_edma3->dma_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n");
+ return ret;
+ }
+ /* Attach power domains from dts for each dma chanel device */
+ if (fsl_edma3->drvdata->has_pd) {
+ for (i = 0; i < fsl_edma3->n_chans; i++) {
+ struct fsl_edma3_chan *fsl_chan = &fsl_edma3->chans[i];
+ struct device *dev;
+
+ dev = fsl_edma3_attach_pd(&pdev->dev, np, i);
+ if (!dev) {
+ dev_err(dev, "edma channel attach failed.\n");
+ return -EINVAL;
+ }
+
+ fsl_chan->dev = dev;
+ /* clear meaningless pending irq anyway */
+ writel(1, fsl_chan->membase + EDMA_CH_INT);
+
+ pm_runtime_use_autosuspend(fsl_chan->dev);
+ pm_runtime_set_autosuspend_delay(fsl_chan->dev, 200);
+ pm_runtime_set_active(fsl_chan->dev);
+ pm_runtime_put_sync_suspend(fsl_chan->dev);
+ }
+ }
+
+ ret = of_dma_controller_register(np, fsl_edma3_xlate, fsl_edma3);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n");
+ dma_async_device_unregister(&fsl_edma3->dma_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fsl_edma3_remove(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct fsl_edma3_engine *fsl_edma3 = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(np);
+ dma_async_device_unregister(&fsl_edma3->dma_dev);
+
+ clk_disable_unprepare(fsl_edma3->clk_mp);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fsl_edma3_suspend_late(struct device *dev)
+{
+ struct fsl_edma3_engine *fsl_edma = dev_get_drvdata(dev);
+ struct fsl_edma3_chan *fsl_chan;
+ unsigned long flags;
+ void __iomem *addr;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ fsl_chan = &fsl_edma->chans[i];
+ addr = fsl_chan->membase;
+
+ if ((fsl_chan->edma3->drvdata->has_pd &&
+ pm_runtime_status_suspended(fsl_chan->dev)) ||
+ (!fsl_chan->edma3->drvdata->has_pd && !fsl_chan->srcid))
+ continue;
+
+ if (fsl_chan->edma3->drvdata->has_pd)
+ pm_runtime_get_sync(fsl_chan->dev);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ fsl_edma->edma_regs[i].csr = readl(addr + EDMA_CH_CSR);
+ fsl_edma->edma_regs[i].sbr = readl(addr + EDMA_CH_SBR);
+ /* Make sure chan is idle or will force disable. */
+ if (unlikely(!fsl_chan->idle)) {
+ dev_warn(dev, "WARN: There is non-idle channel.");
+ fsl_edma3_disable_request(fsl_chan);
+ }
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ if (fsl_chan->edma3->drvdata->has_pd)
+ pm_runtime_put_sync_suspend(fsl_chan->dev);
+ }
+
+ return 0;
+}
+
+static int fsl_edma3_resume_early(struct device *dev)
+{
+ struct fsl_edma3_engine *fsl_edma = dev_get_drvdata(dev);
+ struct fsl_edma3_chan *fsl_chan;
+ void __iomem *addr;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ fsl_chan = &fsl_edma->chans[i];
+ addr = fsl_chan->membase;
+
+ if ((fsl_chan->edma3->drvdata->has_pd &&
+ pm_runtime_status_suspended(fsl_chan->dev)) ||
+ (!fsl_chan->edma3->drvdata->has_pd && !fsl_chan->srcid))
+ continue;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ writel(fsl_edma->edma_regs[i].csr, addr + EDMA_CH_CSR);
+ writel(fsl_edma->edma_regs[i].sbr, addr + EDMA_CH_SBR);
+ /* restore tcd if this channel not terminated before suspend */
+ if (fsl_chan->edesc)
+ fsl_edma3_set_tcd_regs(fsl_chan,
+ fsl_chan->edesc->tcd[0].vtcd);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops fsl_edma3_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(fsl_edma3_suspend_late,
+ fsl_edma3_resume_early)
+};
+
+static struct platform_driver fsl_edma3_driver = {
+ .driver = {
+ .name = "fsl-edma-v3",
+ .of_match_table = fsl_edma3_dt_ids,
+ .pm = &fsl_edma3_pm_ops,
+ },
+ .probe = fsl_edma3_probe,
+ .remove = fsl_edma3_remove,
+};
+
+static int __init fsl_edma3_init(void)
+{
+ return platform_driver_register(&fsl_edma3_driver);
+}
+fs_initcall(fsl_edma3_init);
+
+static void __exit fsl_edma3_exit(void)
+{
+ platform_driver_unregister(&fsl_edma3_driver);
+}
+module_exit(fsl_edma3_exit);
+
+MODULE_ALIAS("platform:fsl-edma3");
+MODULE_DESCRIPTION("Freescale eDMA-V3 engine driver");
+MODULE_LICENSE("GPL v2");