summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/fsldma.c88
-rw-r--r--drivers/dma/fsldma.h48
-rw-r--r--drivers/dma/ioat_dca.c4
-rw-r--r--drivers/dma/iop-adma.c32
5 files changed, 121 insertions, 53 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 29965231b912..8db0e7f9d3f4 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -357,7 +357,7 @@ int dma_async_device_register(struct dma_device *device)
!device->device_prep_dma_zero_sum);
BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
!device->device_prep_dma_memset);
- BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
+ BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
!device->device_prep_dma_interrupt);
BUG_ON(!device->device_alloc_chan_resources);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index cc9a68158d99..72692309398a 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -57,12 +57,12 @@ static void dma_init(struct fsl_dma_chan *fsl_chan)
}
-static void set_sr(struct fsl_dma_chan *fsl_chan, dma_addr_t val)
+static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
{
DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
}
-static dma_addr_t get_sr(struct fsl_dma_chan *fsl_chan)
+static u32 get_sr(struct fsl_dma_chan *fsl_chan)
{
return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
}
@@ -123,6 +123,11 @@ static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
}
+static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
+{
+ return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
+}
+
static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
{
u32 sr = get_sr(fsl_chan);
@@ -406,6 +411,35 @@ static void fsl_dma_free_chan_resources(struct dma_chan *chan)
dma_pool_destroy(fsl_chan->desc_pool);
}
+static struct dma_async_tx_descriptor *
+fsl_dma_prep_interrupt(struct dma_chan *chan)
+{
+ struct fsl_dma_chan *fsl_chan;
+ struct fsl_desc_sw *new;
+
+ if (!chan)
+ return NULL;
+
+ fsl_chan = to_fsl_chan(chan);
+
+ new = fsl_dma_alloc_descriptor(fsl_chan);
+ if (!new) {
+ dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
+ return NULL;
+ }
+
+ new->async_tx.cookie = -EBUSY;
+ new->async_tx.ack = 0;
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &new->async_tx.tx_list);
+
+ /* Set End-of-link to the last link descriptor of new list*/
+ set_ld_eol(fsl_chan, new);
+
+ return &new->async_tx;
+}
+
static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
size_t len, unsigned long flags)
@@ -436,7 +470,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
#endif
- copy = min(len, FSL_DMA_BCR_MAX_CNT);
+ copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
set_desc_cnt(fsl_chan, &new->hw, copy);
set_desc_src(fsl_chan, &new->hw, dma_src);
@@ -513,7 +547,6 @@ static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
spin_lock_irqsave(&fsl_chan->desc_lock, flags);
- fsl_dma_update_completed_cookie(fsl_chan);
dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
fsl_chan->completed_cookie);
list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
@@ -581,8 +614,8 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
if (ld_node != &fsl_chan->ld_queue) {
/* Get the ld start address from ld_queue */
next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
- dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%016llx\n",
- (u64)next_dest_addr);
+ dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n",
+ (void *)next_dest_addr);
set_cdar(fsl_chan, next_dest_addr);
dma_start(fsl_chan);
} else {
@@ -662,7 +695,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
{
struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
- dma_addr_t stat;
+ u32 stat;
stat = get_sr(fsl_chan);
dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
@@ -676,15 +709,32 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
if (stat & FSL_DMA_SR_TE)
dev_err(fsl_chan->dev, "Transfer Error!\n");
+ /* Programming Error
+ * The DMA_INTERRUPT async_tx is a NULL transfer, which will
+ * triger a PE interrupt.
+ */
+ if (stat & FSL_DMA_SR_PE) {
+ dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
+ if (get_bcr(fsl_chan) == 0) {
+ /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
+ * Now, update the completed cookie, and continue the
+ * next uncompleted transfer.
+ */
+ fsl_dma_update_completed_cookie(fsl_chan);
+ fsl_chan_xfer_ld_queue(fsl_chan);
+ }
+ stat &= ~FSL_DMA_SR_PE;
+ }
+
/* If the link descriptor segment transfer finishes,
* we will recycle the used descriptor.
*/
if (stat & FSL_DMA_SR_EOSI) {
dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
- dev_dbg(fsl_chan->dev, "event: clndar 0x%016llx, "
- "nlndar 0x%016llx\n", (u64)get_cdar(fsl_chan),
- (u64)get_ndar(fsl_chan));
+ dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n",
+ (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan));
stat &= ~FSL_DMA_SR_EOSI;
+ fsl_dma_update_completed_cookie(fsl_chan);
}
/* If it current transfer is the end-of-transfer,
@@ -726,12 +776,15 @@ static void dma_do_tasklet(unsigned long data)
fsl_chan_ld_cleanup(fsl_chan);
}
+#ifdef FSL_DMA_CALLBACKTEST
static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan)
{
if (fsl_chan)
dev_info(fsl_chan->dev, "selftest: callback is ok!\n");
}
+#endif
+#ifdef CONFIG_FSL_DMA_SELFTEST
static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
{
struct dma_chan *chan;
@@ -813,6 +866,11 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
async_tx_ack(tx3);
+ /* Interrupt tx test */
+ tx1 = fsl_dma_prep_interrupt(chan);
+ async_tx_ack(tx1);
+ cookie = fsl_dma_tx_submit(tx1);
+
/* Test exchanging the prepared tx sort */
cookie = fsl_dma_tx_submit(tx3);
cookie = fsl_dma_tx_submit(tx2);
@@ -837,9 +895,9 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
if (err) {
for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size);
i++);
- dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%d is "
+ dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%ld is "
"error! src 0x%x, dest 0x%x\n",
- i, test_size, *(src + i), *(dest + i));
+ i, (long)test_size, *(src + i), *(dest + i));
}
free_resources:
@@ -848,6 +906,7 @@ out:
kfree(src);
return err;
}
+#endif
static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
const struct of_device_id *match)
@@ -1008,8 +1067,8 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
}
dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
- "controller at 0x%08x...\n",
- match->compatible, fdev->reg.start);
+ "controller at %p...\n",
+ match->compatible, (void *)fdev->reg.start);
fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
- fdev->reg.start + 1);
@@ -1017,6 +1076,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
+ fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
fdev->common.device_is_tx_complete = fsl_dma_is_complete;
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index ba78c42121ba..6faf07ba0d0e 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -40,6 +40,7 @@
#define FSL_DMA_MR_EOTIE 0x00000080
#define FSL_DMA_SR_CH 0x00000020
+#define FSL_DMA_SR_PE 0x00000010
#define FSL_DMA_SR_CB 0x00000004
#define FSL_DMA_SR_TE 0x00000080
#define FSL_DMA_SR_EOSI 0x00000002
@@ -74,12 +75,15 @@
#define FSL_DMA_DGSR_EOSI 0x02
#define FSL_DMA_DGSR_EOLSI 0x01
+typedef u64 __bitwise v64;
+typedef u32 __bitwise v32;
+
struct fsl_dma_ld_hw {
- u64 __bitwise src_addr;
- u64 __bitwise dst_addr;
- u64 __bitwise next_ln_addr;
- u32 __bitwise count;
- u32 __bitwise reserve;
+ v64 src_addr;
+ v64 dst_addr;
+ v64 next_ln_addr;
+ v32 count;
+ v32 reserve;
} __attribute__((aligned(32)));
struct fsl_desc_sw {
@@ -91,13 +95,13 @@ struct fsl_desc_sw {
} __attribute__((aligned(32)));
struct fsl_dma_chan_regs {
- u32 __bitwise mr; /* 0x00 - Mode Register */
- u32 __bitwise sr; /* 0x04 - Status Register */
- u64 __bitwise cdar; /* 0x08 - Current descriptor address register */
- u64 __bitwise sar; /* 0x10 - Source Address Register */
- u64 __bitwise dar; /* 0x18 - Destination Address Register */
- u32 __bitwise bcr; /* 0x20 - Byte Count Register */
- u64 __bitwise ndar; /* 0x24 - Next Descriptor Address Register */
+ u32 mr; /* 0x00 - Mode Register */
+ u32 sr; /* 0x04 - Status Register */
+ u64 cdar; /* 0x08 - Current descriptor address register */
+ u64 sar; /* 0x10 - Source Address Register */
+ u64 dar; /* 0x18 - Destination Address Register */
+ u32 bcr; /* 0x20 - Byte Count Register */
+ u64 ndar; /* 0x24 - Next Descriptor Address Register */
};
struct fsl_dma_chan;
@@ -150,25 +154,27 @@ struct fsl_dma_chan {
#ifndef __powerpc64__
static u64 in_be64(const u64 __iomem *addr)
{
- return ((u64)in_be32((u32 *)addr) << 32) | (in_be32((u32 *)addr + 1));
+ return ((u64)in_be32((u32 __iomem *)addr) << 32) |
+ (in_be32((u32 __iomem *)addr + 1));
}
static void out_be64(u64 __iomem *addr, u64 val)
{
- out_be32((u32 *)addr, val >> 32);
- out_be32((u32 *)addr + 1, (u32)val);
+ out_be32((u32 __iomem *)addr, val >> 32);
+ out_be32((u32 __iomem *)addr + 1, (u32)val);
}
/* There is no asm instructions for 64 bits reverse loads and stores */
static u64 in_le64(const u64 __iomem *addr)
{
- return ((u64)in_le32((u32 *)addr + 1) << 32) | (in_le32((u32 *)addr));
+ return ((u64)in_le32((u32 __iomem *)addr + 1) << 32) |
+ (in_le32((u32 __iomem *)addr));
}
static void out_le64(u64 __iomem *addr, u64 val)
{
- out_le32((u32 *)addr + 1, val >> 32);
- out_le32((u32 *)addr, (u32)val);
+ out_le32((u32 __iomem *)addr + 1, val >> 32);
+ out_le32((u32 __iomem *)addr, (u32)val);
}
#endif
@@ -181,9 +187,11 @@ static void out_le64(u64 __iomem *addr, u64 val)
#define DMA_TO_CPU(fsl_chan, d, width) \
(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
- be##width##_to_cpu(d) : le##width##_to_cpu(d))
+ be##width##_to_cpu((__force __be##width)(v##width)d) : \
+ le##width##_to_cpu((__force __le##width)(v##width)d))
#define CPU_TO_DMA(fsl_chan, c, width) \
(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
- cpu_to_be##width(c) : cpu_to_le##width(c))
+ (__force v##width)cpu_to_be##width(c) : \
+ (__force v##width)cpu_to_le##width(c))
#endif /* __DMA_FSLDMA_H */
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat_dca.c
index 0fa8a98051a8..9e922760b7ff 100644
--- a/drivers/dma/ioat_dca.c
+++ b/drivers/dma/ioat_dca.c
@@ -98,7 +98,7 @@ struct ioat_dca_slot {
struct ioat_dca_priv {
void __iomem *iobase;
- void *dca_base;
+ void __iomem *dca_base;
int max_requesters;
int requester_count;
u8 tag_map[IOAT_TAG_MAP_LEN];
@@ -338,7 +338,7 @@ static struct dca_ops ioat2_dca_ops = {
.get_tag = ioat2_dca_get_tag,
};
-static int ioat2_dca_count_dca_slots(void *iobase, u16 dca_offset)
+static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
{
int slots = 0;
u32 req;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 3986d54492bd..f82b0906d466 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -140,7 +140,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
int busy = iop_chan_is_busy(iop_chan);
int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
- dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
/* free completed slots from the chain starting with
* the oldest descriptor
*/
@@ -438,7 +438,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
spin_unlock_bh(&iop_chan->lock);
dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
- __FUNCTION__, sw_desc->async_tx.cookie, sw_desc->idx);
+ __func__, sw_desc->async_tx.cookie, sw_desc->idx);
return cookie;
}
@@ -520,7 +520,7 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan)
struct iop_adma_desc_slot *sw_desc, *grp_start;
int slot_cnt, slots_per_op;
- dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
@@ -548,7 +548,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
- __FUNCTION__, len);
+ __func__, len);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
@@ -580,7 +580,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
- __FUNCTION__, len);
+ __func__, len);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op);
@@ -614,7 +614,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
dev_dbg(iop_chan->device->common.dev,
"%s src_cnt: %d len: %u flags: %lx\n",
- __FUNCTION__, src_cnt, len, flags);
+ __func__, src_cnt, len, flags);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
@@ -648,7 +648,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
return NULL;
dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
- __FUNCTION__, src_cnt, len);
+ __func__, src_cnt, len);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
@@ -659,7 +659,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
iop_desc_set_zero_sum_byte_count(grp_start, len);
grp_start->xor_check_result = result;
pr_debug("\t%s: grp_start->xor_check_result: %p\n",
- __FUNCTION__, grp_start->xor_check_result);
+ __func__, grp_start->xor_check_result);
sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len;
while (src_cnt--)
@@ -700,7 +700,7 @@ static void iop_adma_free_chan_resources(struct dma_chan *chan)
iop_chan->last_used = NULL;
dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
- __FUNCTION__, iop_chan->slots_allocated);
+ __func__, iop_chan->slots_allocated);
spin_unlock_bh(&iop_chan->lock);
/* one is ok since we left it on there on purpose */
@@ -753,7 +753,7 @@ static irqreturn_t iop_adma_eot_handler(int irq, void *data)
{
struct iop_adma_chan *chan = data;
- dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(chan->device->common.dev, "%s\n", __func__);
tasklet_schedule(&chan->irq_tasklet);
@@ -766,7 +766,7 @@ static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
{
struct iop_adma_chan *chan = data;
- dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(chan->device->common.dev, "%s\n", __func__);
tasklet_schedule(&chan->irq_tasklet);
@@ -823,7 +823,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
int err = 0;
struct iop_adma_chan *iop_chan;
- dev_dbg(device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(device->common.dev, "%s\n", __func__);
src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL);
if (!src)
@@ -906,7 +906,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
int err = 0;
struct iop_adma_chan *iop_chan;
- dev_dbg(device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(device->common.dev, "%s\n", __func__);
for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
@@ -1159,7 +1159,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
}
dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n",
- __FUNCTION__, adev->dma_desc_pool_virt,
+ __func__, adev->dma_desc_pool_virt,
(void *) adev->dma_desc_pool);
adev->id = plat_data->hw_id;
@@ -1289,7 +1289,7 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
dma_cookie_t cookie;
int slot_cnt, slots_per_op;
- dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
@@ -1346,7 +1346,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
dma_cookie_t cookie;
int slot_cnt, slots_per_op;
- dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);