summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt11
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/mach-omap1/board-h2-mmc.c1
-rw-r--r--arch/arm/mach-omap1/board-h3-mmc.c1
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c1
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c1
-rw-r--r--arch/arm/mach-omap2/hsmmc.c1
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h2
-rw-r--r--drivers/dma/Kconfig6
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/omap-dma.c669
-rw-r--r--drivers/dma/sa11x0-dma.c153
-rw-r--r--drivers/mmc/host/omap.c368
-rw-r--r--drivers/mmc/host/omap_hsmmc.c204
-rw-r--r--drivers/mtd/nand/omap2.c106
-rw-r--r--drivers/spi/spi-omap2-mcspi.c229
-rw-r--r--include/linux/omap-dma.h22
17 files changed, 1280 insertions, 498 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 56000b33340b..1f7ba3537a85 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -612,3 +612,14 @@ When: June 2013
Why: Unsupported/unmaintained/unused since 2.6
----------------------------
+
+What: OMAP private DMA implementation
+When: 2013
+Why: We have a DMA engine implementation; all users should be updated
+ to use this rather than persisting with the old APIs. The old APIs
+ block merging the old DMA engine implementation into the DMA
+ engine driver.
+Who: Russell King <linux@arm.linux.org.uk>,
+ Santosh Shilimkar <santosh.shilimkar@ti.com>
+
+----------------------------
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 9854ff4279e0..3b391b23bdda 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -194,6 +194,8 @@ CONFIG_MMC_OMAP_HS=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_TWL92330=y
CONFIG_RTC_DRV_TWL4030=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_OMAP=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_FS_XATTR is not set
diff --git a/arch/arm/mach-omap1/board-h2-mmc.c b/arch/arm/mach-omap1/board-h2-mmc.c
index da0e37d40823..e1362ce48497 100644
--- a/arch/arm/mach-omap1/board-h2-mmc.c
+++ b/arch/arm/mach-omap1/board-h2-mmc.c
@@ -54,7 +54,6 @@ static struct omap_mmc_platform_data mmc1_data = {
.nr_slots = 1,
.init = mmc_late_init,
.cleanup = mmc_cleanup,
- .dma_mask = 0xffffffff,
.slots[0] = {
.set_power = mmc_set_power,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
diff --git a/arch/arm/mach-omap1/board-h3-mmc.c b/arch/arm/mach-omap1/board-h3-mmc.c
index f8242aa9b763..c74daace8cd6 100644
--- a/arch/arm/mach-omap1/board-h3-mmc.c
+++ b/arch/arm/mach-omap1/board-h3-mmc.c
@@ -36,7 +36,6 @@ static int mmc_set_power(struct device *dev, int slot, int power_on,
*/
static struct omap_mmc_platform_data mmc1_data = {
.nr_slots = 1,
- .dma_mask = 0xffffffff,
.slots[0] = {
.set_power = mmc_set_power,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 7212ae97f44a..c54b45f32638 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -185,7 +185,6 @@ static int nokia770_mmc_get_cover_state(struct device *dev, int slot)
static struct omap_mmc_platform_data nokia770_mmc2_data = {
.nr_slots = 1,
- .dma_mask = 0xffffffff,
.max_freq = 12000000,
.slots[0] = {
.set_power = nokia770_mmc_set_power,
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 2c5d0ed75285..677357ff61ac 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -468,7 +468,6 @@ static struct omap_mmc_platform_data mmc1_data = {
.cleanup = n8x0_mmc_cleanup,
.shutdown = n8x0_mmc_shutdown,
.max_freq = 24000000,
- .dma_mask = 0xffffffff,
.slots[0] = {
.wires = 4,
.set_power = n8x0_mmc_set_power,
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index be697d4e0843..a9675d8d1822 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -315,7 +315,6 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
mmc->slots[0].caps = c->caps;
mmc->slots[0].pm_caps = c->pm_caps;
mmc->slots[0].internal_clock = !c->ext_clock;
- mmc->dma_mask = 0xffffffff;
mmc->max_freq = c->max_freq;
if (cpu_is_omap44xx())
mmc->reg_offset = OMAP4_MMC_REG_OFFSET;
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index 5493bd95da5e..eb3e4d555343 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -81,8 +81,6 @@ struct omap_mmc_platform_data {
/* Return context loss count due to PM states changing */
int (*get_context_loss_count)(struct device *dev);
- u64 dma_mask;
-
/* Integrating attributes from the omap_hwmod layer */
u8 controller_flags;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index be0dc3b7c409..6f93365d9d04 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -262,6 +262,12 @@ config DMA_SA11X0
SA-1110 SoCs. This DMA engine can only be used with on-chip
devices.
+config DMA_OMAP
+ tristate "OMAP DMA support"
+ depends on ARCH_OMAP
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index fc05f7ddac7e..ddc291a2116a 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -29,3 +29,4 @@ obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
+obj-$(CONFIG_DMA_OMAP) += omap-dma.o
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
new file mode 100644
index 000000000000..ae0561826137
--- /dev/null
+++ b/drivers/dma/omap-dma.c
@@ -0,0 +1,669 @@
+/*
+ * OMAP DMAengine support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/omap-dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+#include <plat/dma.h>
+
+struct omap_dmadev {
+ struct dma_device ddev;
+ spinlock_t lock;
+ struct tasklet_struct task;
+ struct list_head pending;
+};
+
+struct omap_chan {
+ struct virt_dma_chan vc;
+ struct list_head node;
+
+ struct dma_slave_config cfg;
+ unsigned dma_sig;
+ bool cyclic;
+
+ int dma_ch;
+ struct omap_desc *desc;
+ unsigned sgidx;
+};
+
+struct omap_sg {
+ dma_addr_t addr;
+ uint32_t en; /* number of elements (24-bit) */
+ uint32_t fn; /* number of frames (16-bit) */
+};
+
+struct omap_desc {
+ struct virt_dma_desc vd;
+ enum dma_transfer_direction dir;
+ dma_addr_t dev_addr;
+
+ int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
+ uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
+ uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
+ uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
+ uint8_t periph_port; /* Peripheral port */
+
+ unsigned sglen;
+ struct omap_sg sg[0];
+};
+
+static const unsigned es_bytes[] = {
+ [OMAP_DMA_DATA_TYPE_S8] = 1,
+ [OMAP_DMA_DATA_TYPE_S16] = 2,
+ [OMAP_DMA_DATA_TYPE_S32] = 4,
+};
+
+static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
+{
+ return container_of(d, struct omap_dmadev, ddev);
+}
+
+static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct omap_chan, vc.chan);
+}
+
+static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct omap_desc, vd.tx);
+}
+
+static void omap_dma_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(container_of(vd, struct omap_desc, vd));
+}
+
+static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
+ unsigned idx)
+{
+ struct omap_sg *sg = d->sg + idx;
+
+ if (d->dir == DMA_DEV_TO_MEM)
+ omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
+ OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
+ else
+ omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
+ OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
+
+ omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
+ d->sync_mode, c->dma_sig, d->sync_type);
+
+ omap_start_dma(c->dma_ch);
+}
+
+static void omap_dma_start_desc(struct omap_chan *c)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+ struct omap_desc *d;
+
+ if (!vd) {
+ c->desc = NULL;
+ return;
+ }
+
+ list_del(&vd->node);
+
+ c->desc = d = to_omap_dma_desc(&vd->tx);
+ c->sgidx = 0;
+
+ if (d->dir == DMA_DEV_TO_MEM)
+ omap_set_dma_src_params(c->dma_ch, d->periph_port,
+ OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
+ else
+ omap_set_dma_dest_params(c->dma_ch, d->periph_port,
+ OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
+
+ omap_dma_start_sg(c, d, 0);
+}
+
+static void omap_dma_callback(int ch, u16 status, void *data)
+{
+ struct omap_chan *c = data;
+ struct omap_desc *d;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ d = c->desc;
+ if (d) {
+ if (!c->cyclic) {
+ if (++c->sgidx < d->sglen) {
+ omap_dma_start_sg(c, d, c->sgidx);
+ } else {
+ omap_dma_start_desc(c);
+ vchan_cookie_complete(&d->vd);
+ }
+ } else {
+ vchan_cyclic_callback(&d->vd);
+ }
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+/*
+ * This callback schedules all pending channels. We could be more
+ * clever here by postponing allocation of the real DMA channels to
+ * this point, and freeing them when our virtual channel becomes idle.
+ *
+ * We would then need to deal with 'all channels in-use'
+ */
+static void omap_dma_sched(unsigned long data)
+{
+ struct omap_dmadev *d = (struct omap_dmadev *)data;
+ LIST_HEAD(head);
+
+ spin_lock_irq(&d->lock);
+ list_splice_tail_init(&d->pending, &head);
+ spin_unlock_irq(&d->lock);
+
+ while (!list_empty(&head)) {
+ struct omap_chan *c = list_first_entry(&head,
+ struct omap_chan, node);
+
+ spin_lock_irq(&c->vc.lock);
+ list_del_init(&c->node);
+ omap_dma_start_desc(c);
+ spin_unlock_irq(&c->vc.lock);
+ }
+}
+
+static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+
+ dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
+
+ return omap_request_dma(c->dma_sig, "DMA engine",
+ omap_dma_callback, c, &c->dma_ch);
+}
+
+static void omap_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+
+ vchan_free_chan_resources(&c->vc);
+ omap_free_dma(c->dma_ch);
+
+ dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
+}
+
+static size_t omap_dma_sg_size(struct omap_sg *sg)
+{
+ return sg->en * sg->fn;
+}
+
+static size_t omap_dma_desc_size(struct omap_desc *d)
+{
+ unsigned i;
+ size_t size;
+
+ for (size = i = 0; i < d->sglen; i++)
+ size += omap_dma_sg_size(&d->sg[i]);
+
+ return size * es_bytes[d->es];
+}
+
+static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
+{
+ unsigned i;
+ size_t size, es_size = es_bytes[d->es];
+
+ for (size = i = 0; i < d->sglen; i++) {
+ size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
+
+ if (size)
+ size += this_size;
+ else if (addr >= d->sg[i].addr &&
+ addr < d->sg[i].addr + this_size)
+ size += d->sg[i].addr + this_size - addr;
+ }
+ return size;
+}
+
+static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_SUCCESS || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vd = vchan_find_desc(&c->vc, cookie);
+ if (vd) {
+ txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
+ } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
+ struct omap_desc *d = c->desc;
+ dma_addr_t pos;
+
+ if (d->dir == DMA_MEM_TO_DEV)
+ pos = omap_get_dma_src_pos(c->dma_ch);
+ else if (d->dir == DMA_DEV_TO_MEM)
+ pos = omap_get_dma_dst_pos(c->dma_ch);
+ else
+ pos = 0;
+
+ txstate->residue = omap_dma_desc_size_pos(d, pos);
+ } else {
+ txstate->residue = 0;
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+
+ return ret;
+}
+
+static void omap_dma_issue_pending(struct dma_chan *chan)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (vchan_issue_pending(&c->vc) && !c->desc) {
+ struct omap_dmadev *d = to_omap_dma_dev(chan->device);
+ spin_lock(&d->lock);
+ if (list_empty(&c->node))
+ list_add_tail(&c->node, &d->pending);
+ spin_unlock(&d->lock);
+ tasklet_schedule(&d->task);
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
+ enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct scatterlist *sgent;
+ struct omap_desc *d;
+ dma_addr_t dev_addr;
+ unsigned i, j = 0, es, en, frame_bytes, sync_type;
+ u32 burst;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_addr = c->cfg.src_addr;
+ dev_width = c->cfg.src_addr_width;
+ burst = c->cfg.src_maxburst;
+ sync_type = OMAP_DMA_SRC_SYNC;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = c->cfg.dst_addr;
+ dev_width = c->cfg.dst_addr_width;
+ burst = c->cfg.dst_maxburst;
+ sync_type = OMAP_DMA_DST_SYNC;
+ } else {
+ dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ /* Bus width translates to the element size (ES) */
+ switch (dev_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ es = OMAP_DMA_DATA_TYPE_S8;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ es = OMAP_DMA_DATA_TYPE_S16;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ es = OMAP_DMA_DATA_TYPE_S32;
+ break;
+ default: /* not reached */
+ return NULL;
+ }
+
+ /* Now allocate and setup the descriptor. */
+ d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
+ if (!d)
+ return NULL;
+
+ d->dir = dir;
+ d->dev_addr = dev_addr;
+ d->es = es;
+ d->sync_mode = OMAP_DMA_SYNC_FRAME;
+ d->sync_type = sync_type;
+ d->periph_port = OMAP_DMA_PORT_TIPB;
+
+ /*
+ * Build our scatterlist entries: each contains the address,
+ * the number of elements (EN) in each frame, and the number of
+ * frames (FN). Number of bytes for this entry = ES * EN * FN.
+ *
+ * Burst size translates to number of elements with frame sync.
+ * Note: DMA engine defines burst to be the number of dev-width
+ * transfers.
+ */
+ en = burst;
+ frame_bytes = es_bytes[es] * en;
+ for_each_sg(sgl, sgent, sglen, i) {
+ d->sg[j].addr = sg_dma_address(sgent);
+ d->sg[j].en = en;
+ d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
+ j++;
+ }
+
+ d->sglen = j;
+
+ return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
+}
+
+static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction dir, void *context)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct omap_desc *d;
+ dma_addr_t dev_addr;
+ unsigned es, sync_type;
+ u32 burst;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_addr = c->cfg.src_addr;
+ dev_width = c->cfg.src_addr_width;
+ burst = c->cfg.src_maxburst;
+ sync_type = OMAP_DMA_SRC_SYNC;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = c->cfg.dst_addr;
+ dev_width = c->cfg.dst_addr_width;
+ burst = c->cfg.dst_maxburst;
+ sync_type = OMAP_DMA_DST_SYNC;
+ } else {
+ dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ /* Bus width translates to the element size (ES) */
+ switch (dev_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ es = OMAP_DMA_DATA_TYPE_S8;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ es = OMAP_DMA_DATA_TYPE_S16;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ es = OMAP_DMA_DATA_TYPE_S32;
+ break;
+ default: /* not reached */
+ return NULL;
+ }
+
+ /* Now allocate and setup the descriptor. */
+ d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
+ if (!d)
+ return NULL;
+
+ d->dir = dir;
+ d->dev_addr = dev_addr;
+ d->fi = burst;
+ d->es = es;
+ d->sync_mode = OMAP_DMA_SYNC_PACKET;
+ d->sync_type = sync_type;
+ d->periph_port = OMAP_DMA_PORT_MPUI;
+ d->sg[0].addr = buf_addr;
+ d->sg[0].en = period_len / es_bytes[es];
+ d->sg[0].fn = buf_len / period_len;
+ d->sglen = 1;
+
+ if (!c->cyclic) {
+ c->cyclic = true;
+ omap_dma_link_lch(c->dma_ch, c->dma_ch);
+ omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
+ omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
+ }
+
+ if (!cpu_class_is_omap1()) {
+ omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
+ }
+
+ return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+}
+
+static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
+{
+ if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+ return -EINVAL;
+
+ memcpy(&c->cfg, cfg, sizeof(c->cfg));
+
+ return 0;
+}
+
+static int omap_dma_terminate_all(struct omap_chan *c)
+{
+ struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+
+ /* Prevent this channel being scheduled */
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+
+ /*
+ * Stop DMA activity: we assume the callback will not be called
+ * after omap_stop_dma() returns (even if it does, it will see
+ * c->desc is NULL and exit.)
+ */
+ if (c->desc) {
+ c->desc = NULL;
+ omap_stop_dma(c->dma_ch);
+ }
+
+ if (c->cyclic) {
+ c->cyclic = false;
+ omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
+ }
+
+ vchan_get_all_descriptors(&c->vc, &head);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
+
+ return 0;
+}
+
+static int omap_dma_pause(struct omap_chan *c)
+{
+ /* FIXME: not supported by platform private API */
+ return -EINVAL;
+}
+
+static int omap_dma_resume(struct omap_chan *c)
+{
+ /* FIXME: not supported by platform private API */
+ return -EINVAL;
+}
+
+static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ int ret;
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
+ break;
+
+ case DMA_TERMINATE_ALL:
+ ret = omap_dma_terminate_all(c);
+ break;
+
+ case DMA_PAUSE:
+ ret = omap_dma_pause(c);
+ break;
+
+ case DMA_RESUME:
+ ret = omap_dma_resume(c);
+ break;
+
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
+static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
+{
+ struct omap_chan *c;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ c->dma_sig = dma_sig;
+ c->vc.desc_free = omap_dma_desc_free;
+ vchan_init(&c->vc, &od->ddev);
+ INIT_LIST_HEAD(&c->node);
+
+ od->ddev.chancnt++;
+
+ return 0;
+}
+
+static void omap_dma_free(struct omap_dmadev *od)
+{
+ tasklet_kill(&od->task);
+ while (!list_empty(&od->ddev.channels)) {
+ struct omap_chan *c = list_first_entry(&od->ddev.channels,
+ struct omap_chan, vc.chan.device_node);
+
+ list_del(&c->vc.chan.device_node);
+ tasklet_kill(&c->vc.task);
+ kfree(c);
+ }
+ kfree(od);
+}
+
+static int omap_dma_probe(struct platform_device *pdev)
+{
+ struct omap_dmadev *od;
+ int rc, i;
+
+ od = kzalloc(sizeof(*od), GFP_KERNEL);
+ if (!od)
+ return -ENOMEM;
+
+ dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
+ od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
+ od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
+ od->ddev.device_tx_status = omap_dma_tx_status;
+ od->ddev.device_issue_pending = omap_dma_issue_pending;
+ od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
+ od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
+ od->ddev.device_control = omap_dma_control;
+ od->ddev.dev = &pdev->dev;
+ INIT_LIST_HEAD(&od->ddev.channels);
+ INIT_LIST_HEAD(&od->pending);
+ spin_lock_init(&od->lock);
+
+ tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
+
+ for (i = 0; i < 127; i++) {
+ rc = omap_dma_chan_init(od, i);
+ if (rc) {
+ omap_dma_free(od);
+ return rc;
+ }
+ }
+
+ rc = dma_async_device_register(&od->ddev);
+ if (rc) {
+ pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
+ rc);
+ omap_dma_free(od);
+ } else {
+ platform_set_drvdata(pdev, od);
+ }
+
+ dev_info(&pdev->dev, "OMAP DMA engine driver\n");
+
+ return rc;
+}
+
+static int omap_dma_remove(struct platform_device *pdev)
+{
+ struct omap_dmadev *od = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&od->ddev);
+ omap_dma_free(od);
+
+ return 0;
+}
+
+static struct platform_driver omap_dma_driver = {
+ .probe = omap_dma_probe,
+ .remove = omap_dma_remove,
+ .driver = {
+ .name = "omap-dma-engine",
+ .owner = THIS_MODULE,
+ },
+};
+
+bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ if (chan->device->dev->driver == &omap_dma_driver.driver) {
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ unsigned req = *(unsigned *)param;
+
+ return req == c->dma_sig;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
+
+static struct platform_device *pdev;
+
+static const struct platform_device_info omap_dma_dev_info = {
+ .name = "omap-dma-engine",
+ .id = -1,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int omap_dma_init(void)
+{
+ int rc = platform_driver_register(&omap_dma_driver);
+
+ if (rc == 0) {
+ pdev = platform_device_register_full(&omap_dma_dev_info);
+ if (IS_ERR(pdev)) {
+ platform_driver_unregister(&omap_dma_driver);
+ rc = PTR_ERR(pdev);
+ }
+ }
+ return rc;
+}
+subsys_initcall(omap_dma_init);
+
+static void __exit omap_dma_exit(void)
+{
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&omap_dma_driver);
+}
+module_exit(omap_dma_exit);
+
+MODULE_AUTHOR("Russell King");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 5f1d2e670837..f5a73606217e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -78,6 +78,8 @@ struct sa11x0_dma_desc {
u32 ddar;
size_t size;
+ unsigned period;
+ bool cyclic;
unsigned sglen;
struct sa11x0_dma_sg sg[0];
@@ -178,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
return;
if (p->sg_load == txd->sglen) {
- struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
+ if (!txd->cyclic) {
+ struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
- /*
- * We have reached the end of the current descriptor.
- * Peek at the next descriptor, and if compatible with
- * the current, start processing it.
- */
- if (txn && txn->ddar == txd->ddar) {
- txd = txn;
- sa11x0_dma_start_desc(p, txn);
+ /*
+ * We have reached the end of the current descriptor.
+ * Peek at the next descriptor, and if compatible with
+ * the current, start processing it.
+ */
+ if (txn && txn->ddar == txd->ddar) {
+ txd = txn;
+ sa11x0_dma_start_desc(p, txn);
+ } else {
+ p->txd_load = NULL;
+ return;
+ }
} else {
- p->txd_load = NULL;
- return;
+ /* Cyclic: reset back to beginning */
+ p->sg_load = 0;
}
}
@@ -224,13 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
struct sa11x0_dma_desc *txd = p->txd_done;
if (++p->sg_done == txd->sglen) {
- vchan_cookie_complete(&txd->vd);
+ if (!txd->cyclic) {
+ vchan_cookie_complete(&txd->vd);
- p->sg_done = 0;
- p->txd_done = p->txd_load;
+ p->sg_done = 0;
+ p->txd_done = p->txd_load;
- if (!p->txd_done)
- tasklet_schedule(&p->dev->task);
+ if (!p->txd_done)
+ tasklet_schedule(&p->dev->task);
+ } else {
+ if ((p->sg_done % txd->period) == 0)
+ vchan_cyclic_callback(&txd->vd);
+
+ /* Cyclic: reset back to beginning */
+ p->sg_done = 0;
+ }
}
sa11x0_dma_start_sg(p, c);
@@ -416,27 +431,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
struct sa11x0_dma_phy *p;
- struct sa11x0_dma_desc *txd;
+ struct virt_dma_desc *vd;
unsigned long flags;
enum dma_status ret;
- size_t bytes = 0;
ret = dma_cookie_status(&c->vc.chan, cookie, state);
if (ret == DMA_SUCCESS)
return ret;
+ if (!state)
+ return c->status;
+
spin_lock_irqsave(&c->vc.lock, flags);
p = c->phy;
- ret = c->status;
- if (p) {
- dma_addr_t addr = sa11x0_dma_pos(p);
- dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
+ /*
+ * If the cookie is on our issue queue, then the residue is
+ * its total size.
+ */
+ vd = vchan_find_desc(&c->vc, cookie);
+ if (vd) {
+ state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
+ } else if (!p) {
+ state->residue = 0;
+ } else {
+ struct sa11x0_dma_desc *txd;
+ size_t bytes = 0;
- txd = p->txd_done;
+ if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
+ txd = p->txd_done;
+ else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
+ txd = p->txd_load;
+ else
+ txd = NULL;
+
+ ret = c->status;
if (txd) {
+ dma_addr_t addr = sa11x0_dma_pos(p);
unsigned i;
+ dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
+
for (i = 0; i < txd->sglen; i++) {
dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
i, txd->sg[i].addr, txd->sg[i].len);
@@ -459,18 +494,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
bytes += txd->sg[i].len;
}
}
- if (txd != p->txd_load && p->txd_load)
- bytes += p->txd_load->size;
- }
- list_for_each_entry(txd, &c->vc.desc_issued, vd.node) {
- bytes += txd->size;
+ state->residue = bytes;
}
spin_unlock_irqrestore(&c->vc.lock, flags);
- if (state)
- state->residue = bytes;
-
- dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes);
+ dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
return ret;
}
@@ -584,6 +612,65 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
return vchan_tx_prep(&c->vc, &txd->vd, flags);
}
+static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
+ enum dma_transfer_direction dir, void *context)
+{
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
+ struct sa11x0_dma_desc *txd;
+ unsigned i, j, k, sglen, sgperiod;
+
+ /* SA11x0 channels can only operate in their native direction */
+ if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
+ dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
+ &c->vc, c->ddar, dir);
+ return NULL;
+ }
+
+ sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
+ sglen = size * sgperiod / period;
+
+ /* Do not allow zero-sized txds */
+ if (sglen == 0)
+ return NULL;
+
+ txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC);
+ if (!txd) {
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
+ return NULL;
+ }
+
+ for (i = k = 0; i < size / period; i++) {
+ size_t tlen, len = period;
+
+ for (j = 0; j < sgperiod; j++, k++) {
+ tlen = len;
+
+ if (tlen > DMA_MAX_SIZE) {
+ unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
+ tlen = (tlen / mult) & ~DMA_ALIGN;
+ }
+
+ txd->sg[k].addr = addr;
+ txd->sg[k].len = tlen;
+ addr += tlen;
+ len -= tlen;
+ }
+
+ WARN_ON(len != 0);
+ }
+
+ WARN_ON(k != sglen);
+
+ txd->ddar = c->ddar;
+ txd->size = size;
+ txd->sglen = sglen;
+ txd->cyclic = 1;
+ txd->period = sgperiod;
+
+ return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+}
+
static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
{
u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
@@ -854,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
}
dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
+ d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
if (ret) {
dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 3e8dcf8d2e05..50e08f03aa65 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -17,10 +17,12 @@
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
+#include <linux/omap-dma.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/clk.h>
@@ -128,6 +130,10 @@ struct mmc_omap_host {
unsigned char id; /* 16xx chips have 2 MMC blocks */
struct clk * iclk;
struct clk * fclk;
+ struct dma_chan *dma_rx;
+ u32 dma_rx_burst;
+ struct dma_chan *dma_tx;
+ u32 dma_tx_burst;
struct resource *mem_res;
void __iomem *virt_base;
unsigned int phys_base;
@@ -153,12 +159,8 @@ struct mmc_omap_host {
unsigned use_dma:1;
unsigned brs_received:1, dma_done:1;
- unsigned dma_is_read:1;
unsigned dma_in_use:1;
- int dma_ch;
spinlock_t dma_lock;
- struct timer_list dma_timer;
- unsigned dma_len;
struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS];
struct mmc_omap_slot *current_slot;
@@ -406,18 +408,25 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
int abort)
{
enum dma_data_direction dma_data_dir;
+ struct device *dev = mmc_dev(host->mmc);
+ struct dma_chan *c;
- BUG_ON(host->dma_ch < 0);
- if (data->error)
- omap_stop_dma(host->dma_ch);
- /* Release DMA channel lazily */
- mod_timer(&host->dma_timer, jiffies + HZ);
- if (data->flags & MMC_DATA_WRITE)
+ if (data->flags & MMC_DATA_WRITE) {
dma_data_dir = DMA_TO_DEVICE;
- else
+ c = host->dma_tx;
+ } else {
dma_data_dir = DMA_FROM_DEVICE;
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
- dma_data_dir);
+ c = host->dma_rx;
+ }
+ if (c) {
+ if (data->error) {
+ dmaengine_terminate_all(c);
+ /* Claim nothing transferred on error... */
+ data->bytes_xfered = 0;
+ }
+ dev = c->device->dev;
+ }
+ dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
}
static void mmc_omap_send_stop_work(struct work_struct *work)
@@ -525,16 +534,6 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
}
static void
-mmc_omap_dma_timer(unsigned long data)
-{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
-
- BUG_ON(host->dma_ch < 0);
- omap_free_dma(host->dma_ch);
- host->dma_ch = -1;
-}
-
-static void
mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
{
unsigned long flags;
@@ -891,159 +890,15 @@ static void mmc_omap_cover_handler(unsigned long param)
jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
}
-/* Prepare to transfer the next segment of a scatterlist */
-static void
-mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
+static void mmc_omap_dma_callback(void *priv)
{
- int dma_ch = host->dma_ch;
- unsigned long data_addr;
- u16 buf, frame;
- u32 count;
- struct scatterlist *sg = &data->sg[host->sg_idx];
- int src_port = 0;
- int dst_port = 0;
- int sync_dev = 0;
-
- data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
- frame = data->blksz;
- count = sg_dma_len(sg);
-
- if ((data->blocks == 1) && (count > data->blksz))
- count = frame;
-
- host->dma_len = count;
-
- /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
- * Use 16 or 32 word frames when the blocksize is at least that large.
- * Blocksize is usually 512 bytes; but not for some SD reads.
- */
- if (cpu_is_omap15xx() && frame > 32)
- frame = 32;
- else if (frame > 64)
- frame = 64;
- count /= frame;
- frame >>= 1;
-
- if (!(data->flags & MMC_DATA_WRITE)) {
- buf = 0x800f | ((frame - 1) << 8);
-
- if (cpu_class_is_omap1()) {
- src_port = OMAP_DMA_PORT_TIPB;
- dst_port = OMAP_DMA_PORT_EMIFF;
- }
- if (cpu_is_omap24xx())
- sync_dev = OMAP24XX_DMA_MMC1_RX;
-
- omap_set_dma_src_params(dma_ch, src_port,
- OMAP_DMA_AMODE_CONSTANT,
- data_addr, 0, 0);
- omap_set_dma_dest_params(dma_ch, dst_port,
- OMAP_DMA_AMODE_POST_INC,
- sg_dma_address(sg), 0, 0);
- omap_set_dma_dest_data_pack(dma_ch, 1);
- omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
- } else {
- buf = 0x0f80 | ((frame - 1) << 0);
-
- if (cpu_class_is_omap1()) {
- src_port = OMAP_DMA_PORT_EMIFF;
- dst_port = OMAP_DMA_PORT_TIPB;
- }
- if (cpu_is_omap24xx())
- sync_dev = OMAP24XX_DMA_MMC1_TX;
-
- omap_set_dma_dest_params(dma_ch, dst_port,
- OMAP_DMA_AMODE_CONSTANT,
- data_addr, 0, 0);
- omap_set_dma_src_params(dma_ch, src_port,
- OMAP_DMA_AMODE_POST_INC,
- sg_dma_address(sg), 0, 0);
- omap_set_dma_src_data_pack(dma_ch, 1);
- omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
- }
+ struct mmc_omap_host *host = priv;
+ struct mmc_data *data = host->data;
- /* Max limit for DMA frame count is 0xffff */
- BUG_ON(count > 0xffff);
+ /* If we got to the end of DMA, assume everything went well */
+ data->bytes_xfered += data->blocks * data->blksz;
- OMAP_MMC_WRITE(host, BUF, buf);
- omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
- frame, count, OMAP_DMA_SYNC_FRAME,
- sync_dev, 0);
-}
-
-/* A scatterlist segment completed */
-static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
-{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
- struct mmc_data *mmcdat = host->data;
-
- if (unlikely(host->dma_ch < 0)) {
- dev_err(mmc_dev(host->mmc),
- "DMA callback while DMA not enabled\n");
- return;
- }
- /* FIXME: We really should do something to _handle_ the errors */
- if (ch_status & OMAP1_DMA_TOUT_IRQ) {
- dev_err(mmc_dev(host->mmc),"DMA timeout\n");
- return;
- }
- if (ch_status & OMAP_DMA_DROP_IRQ) {
- dev_err(mmc_dev(host->mmc), "DMA sync error\n");
- return;
- }
- if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
- return;
- }
- mmcdat->bytes_xfered += host->dma_len;
- host->sg_idx++;
- if (host->sg_idx < host->sg_len) {
- mmc_omap_prepare_dma(host, host->data);
- omap_start_dma(host->dma_ch);
- } else
- mmc_omap_dma_done(host, host->data);
-}
-
-static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
-{
- const char *dma_dev_name;
- int sync_dev, dma_ch, is_read, r;
-
- is_read = !(data->flags & MMC_DATA_WRITE);
- del_timer_sync(&host->dma_timer);
- if (host->dma_ch >= 0) {
- if (is_read == host->dma_is_read)
- return 0;
- omap_free_dma(host->dma_ch);
- host->dma_ch = -1;
- }
-
- if (is_read) {
- if (host->id == 0) {
- sync_dev = OMAP_DMA_MMC_RX;
- dma_dev_name = "MMC1 read";
- } else {
- sync_dev = OMAP_DMA_MMC2_RX;
- dma_dev_name = "MMC2 read";
- }
- } else {
- if (host->id == 0) {
- sync_dev = OMAP_DMA_MMC_TX;
- dma_dev_name = "MMC1 write";
- } else {
- sync_dev = OMAP_DMA_MMC2_TX;
- dma_dev_name = "MMC2 write";
- }
- }
- r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb,
- host, &dma_ch);
- if (r != 0) {
- dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
- return r;
- }
- host->dma_ch = dma_ch;
- host->dma_is_read = is_read;
-
- return 0;
+ mmc_omap_dma_done(host, data);
}
static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
@@ -1118,33 +973,85 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
host->sg_idx = 0;
if (use_dma) {
- if (mmc_omap_get_dma_channel(host, data) == 0) {
- enum dma_data_direction dma_data_dir;
-
- if (data->flags & MMC_DATA_WRITE)
- dma_data_dir = DMA_TO_DEVICE;
- else
- dma_data_dir = DMA_FROM_DEVICE;
-
- host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
- sg_len, dma_data_dir);
- host->total_bytes_left = 0;
- mmc_omap_prepare_dma(host, req->data);
- host->brs_received = 0;
- host->dma_done = 0;
- host->dma_in_use = 1;
- } else
- use_dma = 0;
+ enum dma_data_direction dma_data_dir;
+ struct dma_async_tx_descriptor *tx;
+ struct dma_chan *c;
+ u32 burst, *bp;
+ u16 buf;
+
+ /*
+ * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx
+ * and 24xx. Use 16 or 32 word frames when the
+ * blocksize is at least that large. Blocksize is
+ * usually 512 bytes; but not for some SD reads.
+ */
+ burst = cpu_is_omap15xx() ? 32 : 64;
+ if (burst > data->blksz)
+ burst = data->blksz;
+
+ burst >>= 1;
+
+ if (data->flags & MMC_DATA_WRITE) {
+ c = host->dma_tx;
+ bp = &host->dma_tx_burst;
+ buf = 0x0f80 | (burst - 1) << 0;
+ dma_data_dir = DMA_TO_DEVICE;
+ } else {
+ c = host->dma_rx;
+ bp = &host->dma_rx_burst;
+ buf = 0x800f | (burst - 1) << 8;
+ dma_data_dir = DMA_FROM_DEVICE;
+ }
+
+ if (!c)
+ goto use_pio;
+
+ /* Only reconfigure if we have a different burst size */
+ if (*bp != burst) {
+ struct dma_slave_config cfg;
+
+ cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
+ cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.src_maxburst = burst;
+ cfg.dst_maxburst = burst;
+
+ if (dmaengine_slave_config(c, &cfg))
+ goto use_pio;
+
+ *bp = burst;
+ }
+
+ host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
+ dma_data_dir);
+ if (host->sg_len == 0)
+ goto use_pio;
+
+ tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
+ data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx)
+ goto use_pio;
+
+ OMAP_MMC_WRITE(host, BUF, buf);
+
+ tx->callback = mmc_omap_dma_callback;
+ tx->callback_param = host;
+ dmaengine_submit(tx);
+ host->brs_received = 0;
+ host->dma_done = 0;
+ host->dma_in_use = 1;
+ return;
}
+ use_pio:
/* Revert to PIO? */
- if (!use_dma) {
- OMAP_MMC_WRITE(host, BUF, 0x1f1f);
- host->total_bytes_left = data->blocks * block_size;
- host->sg_len = sg_len;
- mmc_omap_sg_to_buf(host);
- host->dma_in_use = 0;
- }
+ OMAP_MMC_WRITE(host, BUF, 0x1f1f);
+ host->total_bytes_left = data->blocks * block_size;
+ host->sg_len = sg_len;
+ mmc_omap_sg_to_buf(host);
+ host->dma_in_use = 0;
}
static void mmc_omap_start_request(struct mmc_omap_host *host,
@@ -1157,8 +1064,12 @@ static void mmc_omap_start_request(struct mmc_omap_host *host,
/* only touch fifo AFTER the controller readies it */
mmc_omap_prepare_data(host, req);
mmc_omap_start_command(host, req->cmd);
- if (host->dma_in_use)
- omap_start_dma(host->dma_ch);
+ if (host->dma_in_use) {
+ struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
+ host->dma_tx : host->dma_rx;
+
+ dma_async_issue_pending(c);
+ }
}
static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
@@ -1400,6 +1311,8 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
struct mmc_omap_host *host = NULL;
struct resource *res;
+ dma_cap_mask_t mask;
+ unsigned sig;
int i, ret = 0;
int irq;
@@ -1439,7 +1352,6 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
spin_lock_init(&host->dma_lock);
- setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
spin_lock_init(&host->slot_lock);
init_waitqueue_head(&host->slot_wq);
@@ -1450,11 +1362,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
host->id = pdev->id;
host->mem_res = res;
host->irq = irq;
-
host->use_dma = 1;
- host->dev->dma_mask = &pdata->dma_mask;
- host->dma_ch = -1;
-
host->irq = irq;
host->phys_base = host->mem_res->start;
host->virt_base = ioremap(res->start, resource_size(res));
@@ -1474,9 +1382,48 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
goto err_free_iclk;
}
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->dma_tx_burst = -1;
+ host->dma_rx_burst = -1;
+
+ if (cpu_is_omap24xx())
+ sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
+ else
+ sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
+ host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+#if 0
+ if (!host->dma_tx) {
+ dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n",
+ sig);
+ goto err_dma;
+ }
+#else
+ if (!host->dma_tx)
+ dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
+ sig);
+#endif
+ if (cpu_is_omap24xx())
+ sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
+ else
+ sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
+ host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+#if 0
+ if (!host->dma_rx) {
+ dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n",
+ sig);
+ goto err_dma;
+ }
+#else
+ if (!host->dma_rx)
+ dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
+ sig);
+#endif
+
ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
if (ret)
- goto err_free_fclk;
+ goto err_free_dma;
if (pdata->init != NULL) {
ret = pdata->init(&pdev->dev);
@@ -1510,7 +1457,11 @@ err_plat_cleanup:
pdata->cleanup(&pdev->dev);
err_free_irq:
free_irq(host->irq, host);
-err_free_fclk:
+err_free_dma:
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
clk_put(host->fclk);
err_free_iclk:
clk_disable(host->iclk);
@@ -1545,6 +1496,11 @@ static int __devexit mmc_omap_remove(struct platform_device *pdev)
clk_disable(host->iclk);
clk_put(host->iclk);
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
+
iounmap(host->virt_base);
release_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 389a3eedfc24..823d21cb87c0 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
+#include <linux/dmaengine.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -29,6 +30,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
+#include <linux/omap-dma.h>
#include <linux/mmc/host.h>
#include <linux/mmc/core.h>
#include <linux/mmc/mmc.h>
@@ -37,7 +39,6 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
-#include <plat/dma.h>
#include <mach/hardware.h>
#include <plat/board.h>
#include <plat/mmc.h>
@@ -166,7 +167,8 @@ struct omap_hsmmc_host {
int suspended;
int irq;
int use_dma, dma_ch;
- int dma_line_tx, dma_line_rx;
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
int slot_id;
int response_busy;
int context_loss;
@@ -797,6 +799,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
return DMA_FROM_DEVICE;
}
+static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
+ struct mmc_data *data)
+{
+ return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
+}
+
static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
{
int dma_ch;
@@ -889,10 +897,13 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
spin_unlock_irqrestore(&host->irq_lock, flags);
if (host->use_dma && dma_ch != -1) {
- dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
- host->data->sg_len,
+ struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
+
+ dmaengine_terminate_all(chan);
+ dma_unmap_sg(chan->device->dev,
+ host->data->sg, host->data->sg_len,
omap_hsmmc_get_dma_dir(host, host->data));
- omap_free_dma(dma_ch);
+
host->data->host_cookie = 0;
}
host->data = NULL;
@@ -1190,90 +1201,29 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host,
- struct mmc_data *data)
-{
- int sync_dev;
-
- if (data->flags & MMC_DATA_WRITE)
- sync_dev = host->dma_line_tx;
- else
- sync_dev = host->dma_line_rx;
- return sync_dev;
-}
-
-static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
- struct mmc_data *data,
- struct scatterlist *sgl)
+static void omap_hsmmc_dma_callback(void *param)
{
- int blksz, nblk, dma_ch;
-
- dma_ch = host->dma_ch;
- if (data->flags & MMC_DATA_WRITE) {
- omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
- omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- sg_dma_address(sgl), 0, 0);
- } else {
- omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
- omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- sg_dma_address(sgl), 0, 0);
- }
-
- blksz = host->data->blksz;
- nblk = sg_dma_len(sgl) / blksz;
-
- omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
- blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
- omap_hsmmc_get_dma_sync_dev(host, data),
- !(data->flags & MMC_DATA_WRITE));
-
- omap_start_dma(dma_ch);
-}
-
-/*
- * DMA call back function
- */
-static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
-{
- struct omap_hsmmc_host *host = cb_data;
+ struct omap_hsmmc_host *host = param;
+ struct dma_chan *chan;
struct mmc_data *data;
- int dma_ch, req_in_progress;
- unsigned long flags;
+ int req_in_progress;
- if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
- dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
- ch_status);
- return;
- }
-
- spin_lock_irqsave(&host->irq_lock, flags);
+ spin_lock_irq(&host->irq_lock);
if (host->dma_ch < 0) {
- spin_unlock_irqrestore(&host->irq_lock, flags);
+ spin_unlock_irq(&host->irq_lock);
return;
}
data = host->mrq->data;
- host->dma_sg_idx++;
- if (host->dma_sg_idx < host->dma_len) {
- /* Fire up the next transfer. */
- omap_hsmmc_config_dma_params(host, data,
- data->sg + host->dma_sg_idx);
- spin_unlock_irqrestore(&host->irq_lock, flags);
- return;
- }
-
+ chan = omap_hsmmc_get_dma_chan(host, data);
if (!data->host_cookie)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ dma_unmap_sg(chan->device->dev,
+ data->sg, data->sg_len,
omap_hsmmc_get_dma_dir(host, data));
req_in_progress = host->req_in_progress;
- dma_ch = host->dma_ch;
host->dma_ch = -1;
- spin_unlock_irqrestore(&host->irq_lock, flags);
-
- omap_free_dma(dma_ch);
+ spin_unlock_irq(&host->irq_lock);
/* If DMA has finished after TC, complete the request */
if (!req_in_progress) {
@@ -1286,7 +1236,8 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
struct mmc_data *data,
- struct omap_hsmmc_next *next)
+ struct omap_hsmmc_next *next,
+ struct dma_chan *chan)
{
int dma_len;
@@ -1301,8 +1252,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
/* Check if next job is already prepared */
if (next ||
(!next && data->host_cookie != host->next_data.cookie)) {
- dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len,
+ dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
omap_hsmmc_get_dma_dir(host, data));
} else {
@@ -1329,8 +1279,11 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
struct mmc_request *req)
{
- int dma_ch = 0, ret = 0, i;
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor *tx;
+ int ret = 0, i;
struct mmc_data *data = req->data;
+ struct dma_chan *chan;
/* Sanity check: all the SG entries must be aligned by block size. */
for (i = 0; i < data->sg_len; i++) {
@@ -1348,22 +1301,41 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
BUG_ON(host->dma_ch != -1);
- ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
- "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
- if (ret != 0) {
- dev_err(mmc_dev(host->mmc),
- "%s: omap_request_dma() failed with %d\n",
- mmc_hostname(host->mmc), ret);
+ chan = omap_hsmmc_get_dma_chan(host, data);
+
+ cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
+ cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = data->blksz / 4;
+ cfg.dst_maxburst = data->blksz / 4;
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret)
return ret;
- }
- ret = omap_hsmmc_pre_dma_transfer(host, data, NULL);
+
+ ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
if (ret)
return ret;
- host->dma_ch = dma_ch;
- host->dma_sg_idx = 0;
+ tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
+ data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx) {
+ dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
+ /* FIXME: cleanup */
+ return -1;
+ }
+
+ tx->callback = omap_hsmmc_dma_callback;
+ tx->callback_param = host;
+
+ /* Does not fail */
+ dmaengine_submit(tx);
- omap_hsmmc_config_dma_params(host, data, data->sg);
+ host->dma_ch = 1;
+
+ dma_async_issue_pending(chan);
return 0;
}
@@ -1445,11 +1417,11 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
struct omap_hsmmc_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
- if (host->use_dma) {
- if (data->host_cookie)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len,
- omap_hsmmc_get_dma_dir(host, data));
+ if (host->use_dma && data->host_cookie) {
+ struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
+
+ dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
data->host_cookie = 0;
}
}
@@ -1464,10 +1436,13 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
return ;
}
- if (host->use_dma)
+ if (host->use_dma) {
+ struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
+
if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
- &host->next_data))
+ &host->next_data, c))
mrq->data->host_cookie = 0;
+ }
}
/*
@@ -1800,6 +1775,8 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
struct resource *res;
int ret, irq;
const struct of_device_id *match;
+ dma_cap_mask_t mask;
+ unsigned tx_req, rx_req;
match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
if (match) {
@@ -1844,7 +1821,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
host->pdata = pdata;
host->dev = &pdev->dev;
host->use_dma = 1;
- host->dev->dma_mask = &pdata->dma_mask;
host->dma_ch = -1;
host->irq = irq;
host->slot_id = 0;
@@ -1933,14 +1909,31 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
goto err_irq;
}
- host->dma_line_tx = res->start;
+ tx_req = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
if (!res) {
dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
goto err_irq;
}
- host->dma_line_rx = res->start;
+ rx_req = res->start;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
+ if (!host->rx_chan) {
+ dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
+ ret = -ENXIO;
+ goto err_irq;
+ }
+
+ host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
+ if (!host->tx_chan) {
+ dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
+ ret = -ENXIO;
+ goto err_irq;
+ }
/* Request IRQ for MMC operations */
ret = request_irq(host->irq, omap_hsmmc_irq, 0,
@@ -2019,6 +2012,10 @@ err_reg:
err_irq_cd_init:
free_irq(host->irq, host);
err_irq:
+ if (host->tx_chan)
+ dma_release_channel(host->tx_chan);
+ if (host->rx_chan)
+ dma_release_channel(host->rx_chan);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
clk_put(host->fclk);
@@ -2054,6 +2051,11 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
if (mmc_slot(host).card_detect_irq)
free_irq(mmc_slot(host).card_detect_irq, host);
+ if (host->tx_chan)
+ dma_release_channel(host->tx_chan);
+ if (host->rx_chan)
+ dma_release_channel(host->rx_chan);
+
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
clk_put(host->fclk);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index d7f681d0c9b9..e9309b3659e7 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -9,6 +9,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -18,6 +19,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/omap-dma.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -123,7 +125,7 @@ struct omap_nand_info {
int gpmc_cs;
unsigned long phys_base;
struct completion comp;
- int dma_ch;
+ struct dma_chan *dma;
int gpmc_irq;
enum {
OMAP_NAND_IO_READ = 0, /* read */
@@ -336,12 +338,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
}
/*
- * omap_nand_dma_cb: callback on the completion of dma transfer
- * @lch: logical channel
- * @ch_satuts: channel status
+ * omap_nand_dma_callback: callback on the completion of dma transfer
* @data: pointer to completion data structure
*/
-static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
+static void omap_nand_dma_callback(void *data)
{
complete((struct completion *) data);
}
@@ -358,17 +358,13 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
{
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
+ struct dma_async_tx_descriptor *tx;
enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
DMA_FROM_DEVICE;
- dma_addr_t dma_addr;
- int ret;
+ struct scatterlist sg;
unsigned long tim, limit;
-
- /* The fifo depth is 64 bytes max.
- * But configure the FIFO-threahold to 32 to get a sync at each frame
- * and frame length is 32 bytes.
- */
- int buf_len = len >> 6;
+ unsigned n;
+ int ret;
if (addr >= high_memory) {
struct page *p1;
@@ -382,40 +378,33 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
}
- dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
- if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
+ sg_init_one(&sg, addr, len);
+ n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
+ if (n == 0) {
dev_err(&info->pdev->dev,
"Couldn't DMA map a %d byte buffer\n", len);
goto out_copy;
}
- if (is_write) {
- omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- info->phys_base, 0, 0);
- omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- dma_addr, 0, 0);
- omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
- 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
- OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
- } else {
- omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- info->phys_base, 0, 0);
- omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- dma_addr, 0, 0);
- omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
- 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
- OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
- }
- /* configure and start prefetch transfer */
+ tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
+ is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx)
+ goto out_copy_unmap;
+
+ tx->callback = omap_nand_dma_callback;
+ tx->callback_param = &info->comp;
+ dmaengine_submit(tx);
+
+ /* configure and start prefetch transfer */
ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
if (ret)
/* PFPW engine is busy, use cpu copy method */
goto out_copy_unmap;
init_completion(&info->comp);
-
- omap_start_dma(info->dma_ch);
+ dma_async_issue_pending(info->dma);
/* setup and start DMA using dma_addr */
wait_for_completion(&info->comp);
@@ -427,11 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
/* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs);
- dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
+ dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
return 0;
out_copy_unmap:
- dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
+ dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
out_copy:
if (info->nand.options & NAND_BUSWIDTH_16)
is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -1164,6 +1153,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
struct omap_nand_platform_data *pdata;
int err;
int i, offset;
+ dma_cap_mask_t mask;
+ unsigned sig;
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -1244,18 +1235,31 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
break;
case NAND_OMAP_PREFETCH_DMA:
- err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
- omap_nand_dma_cb, &info->comp, &info->dma_ch);
- if (err < 0) {
- info->dma_ch = -1;
- dev_err(&pdev->dev, "DMA request failed!\n");
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ sig = OMAP24XX_DMA_GPMC;
+ info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+ if (!info->dma) {
+ dev_err(&pdev->dev, "DMA engine request failed\n");
+ err = -ENXIO;
goto out_release_mem_region;
} else {
- omap_set_dma_dest_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
- omap_set_dma_src_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
-
+ struct dma_slave_config cfg;
+ int rc;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.src_addr = info->phys_base;
+ cfg.dst_addr = info->phys_base;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 16;
+ cfg.dst_maxburst = 16;
+ rc = dmaengine_slave_config(info->dma, &cfg);
+ if (rc) {
+ dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
+ rc);
+ goto out_release_mem_region;
+ }
info->nand.read_buf = omap_read_buf_dma_pref;
info->nand.write_buf = omap_write_buf_dma_pref;
}
@@ -1358,6 +1362,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
return 0;
out_release_mem_region:
+ if (info->dma)
+ dma_release_channel(info->dma);
release_mem_region(info->phys_base, NAND_IO_SIZE);
out_free_info:
kfree(info);
@@ -1373,8 +1379,8 @@ static int omap_nand_remove(struct platform_device *pdev)
omap3_free_bch(&info->mtd);
platform_set_drvdata(pdev, NULL);
- if (info->dma_ch != -1)
- omap_free_dma(info->dma_ch);
+ if (info->dma)
+ dma_release_channel(info->dma);
if (info->gpmc_irq)
free_irq(info->gpmc_irq, info);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 0c73dd4f43a0..26a99ae9a599 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -28,6 +28,8 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
@@ -39,7 +41,6 @@
#include <linux/spi/spi.h>
-#include <plat/dma.h>
#include <plat/clock.h>
#include <plat/mcspi.h>
@@ -93,8 +94,8 @@
/* We have 2 DMA channels per CS, one for RX and one for TX */
struct omap2_mcspi_dma {
- int dma_tx_channel;
- int dma_rx_channel;
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
int dma_tx_sync_dev;
int dma_rx_sync_dev;
@@ -300,20 +301,46 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
return 0;
}
+static void omap2_mcspi_rx_callback(void *data)
+{
+ struct spi_device *spi = data;
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ complete(&mcspi_dma->dma_rx_completion);
+
+ /* We must disable the DMA RX request */
+ omap2_mcspi_set_dma_req(spi, 1, 0);
+}
+
+static void omap2_mcspi_tx_callback(void *data)
+{
+ struct spi_device *spi = data;
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ complete(&mcspi_dma->dma_tx_completion);
+
+ /* We must disable the DMA TX request */
+ omap2_mcspi_set_dma_req(spi, 0, 0);
+}
+
static unsigned
omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
{
struct omap2_mcspi *mcspi;
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi_dma *mcspi_dma;
- unsigned int count, c;
- unsigned long base, tx_reg, rx_reg;
- int word_len, data_type, element_count;
+ unsigned int count;
+ int word_len, element_count;
int elements = 0;
u32 l;
u8 * rx;
const u8 * tx;
void __iomem *chstat_reg;
+ struct dma_slave_config cfg;
+ enum dma_slave_buswidth width;
+ unsigned es;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
@@ -321,68 +348,92 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+ if (cs->word_len <= 8) {
+ width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ es = 1;
+ } else if (cs->word_len <= 16) {
+ width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ es = 2;
+ } else {
+ width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ es = 4;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
+ cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
+ cfg.src_addr_width = width;
+ cfg.dst_addr_width = width;
+ cfg.src_maxburst = 1;
+ cfg.dst_maxburst = 1;
+
+ if (xfer->tx_buf && mcspi_dma->dma_tx) {
+ struct dma_async_tx_descriptor *tx;
+ struct scatterlist sg;
+
+ dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
+
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = xfer->tx_dma;
+ sg_dma_len(&sg) = xfer->len;
+
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (tx) {
+ tx->callback = omap2_mcspi_tx_callback;
+ tx->callback_param = spi;
+ dmaengine_submit(tx);
+ } else {
+ /* FIXME: fall back to PIO? */
+ }
+ }
+
+ if (xfer->rx_buf && mcspi_dma->dma_rx) {
+ struct dma_async_tx_descriptor *tx;
+ struct scatterlist sg;
+ size_t len = xfer->len - es;
+
+ dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
+
+ if (l & OMAP2_MCSPI_CHCONF_TURBO)
+ len -= es;
+
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = xfer->rx_dma;
+ sg_dma_len(&sg) = len;
+
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (tx) {
+ tx->callback = omap2_mcspi_rx_callback;
+ tx->callback_param = spi;
+ dmaengine_submit(tx);
+ } else {
+ /* FIXME: fall back to PIO? */
+ }
+ }
+
count = xfer->len;
- c = count;
word_len = cs->word_len;
- base = cs->phys;
- tx_reg = base + OMAP2_MCSPI_TX0;
- rx_reg = base + OMAP2_MCSPI_RX0;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
if (word_len <= 8) {
- data_type = OMAP_DMA_DATA_TYPE_S8;
element_count = count;
} else if (word_len <= 16) {
- data_type = OMAP_DMA_DATA_TYPE_S16;
element_count = count >> 1;
} else /* word_len <= 32 */ {
- data_type = OMAP_DMA_DATA_TYPE_S32;
element_count = count >> 2;
}
if (tx != NULL) {
- omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
- data_type, element_count, 1,
- OMAP_DMA_SYNC_ELEMENT,
- mcspi_dma->dma_tx_sync_dev, 0);
-
- omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
- OMAP_DMA_AMODE_CONSTANT,
- tx_reg, 0, 0);
-
- omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
- OMAP_DMA_AMODE_POST_INC,
- xfer->tx_dma, 0, 0);
- }
-
- if (rx != NULL) {
- elements = element_count - 1;
- if (l & OMAP2_MCSPI_CHCONF_TURBO)
- elements--;
-
- omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
- data_type, elements, 1,
- OMAP_DMA_SYNC_ELEMENT,
- mcspi_dma->dma_rx_sync_dev, 1);
-
- omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
- OMAP_DMA_AMODE_CONSTANT,
- rx_reg, 0, 0);
-
- omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
- OMAP_DMA_AMODE_POST_INC,
- xfer->rx_dma, 0, 0);
- }
-
- if (tx != NULL) {
- omap_start_dma(mcspi_dma->dma_tx_channel);
+ dma_async_issue_pending(mcspi_dma->dma_tx);
omap2_mcspi_set_dma_req(spi, 0, 1);
}
if (rx != NULL) {
- omap_start_dma(mcspi_dma->dma_rx_channel);
+ dma_async_issue_pending(mcspi_dma->dma_rx);
omap2_mcspi_set_dma_req(spi, 1, 1);
}
@@ -406,7 +457,10 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
dma_unmap_single(&spi->dev, xfer->rx_dma, count, DMA_FROM_DEVICE);
omap2_mcspi_set_enable(spi, 0);
+ elements = element_count - 1;
+
if (l & OMAP2_MCSPI_CHCONF_TURBO) {
+ elements--;
if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
& OMAP2_MCSPI_CHSTAT_RXS)) {
@@ -723,64 +777,38 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
return 0;
}
-static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
-{
- struct spi_device *spi = data;
- struct omap2_mcspi *mcspi;
- struct omap2_mcspi_dma *mcspi_dma;
-
- mcspi = spi_master_get_devdata(spi->master);
- mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
-
- complete(&mcspi_dma->dma_rx_completion);
-
- /* We must disable the DMA RX request */
- omap2_mcspi_set_dma_req(spi, 1, 0);
-}
-
-static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
-{
- struct spi_device *spi = data;
- struct omap2_mcspi *mcspi;
- struct omap2_mcspi_dma *mcspi_dma;
-
- mcspi = spi_master_get_devdata(spi->master);
- mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
-
- complete(&mcspi_dma->dma_tx_completion);
-
- /* We must disable the DMA TX request */
- omap2_mcspi_set_dma_req(spi, 0, 0);
-}
-
static int omap2_mcspi_request_dma(struct spi_device *spi)
{
struct spi_master *master = spi->master;
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
+ dma_cap_mask_t mask;
+ unsigned sig;
mcspi = spi_master_get_devdata(master);
mcspi_dma = mcspi->dma_channels + spi->chip_select;
- if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
- omap2_mcspi_dma_rx_callback, spi,
- &mcspi_dma->dma_rx_channel)) {
- dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
+ init_completion(&mcspi_dma->dma_rx_completion);
+ init_completion(&mcspi_dma->dma_tx_completion);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ sig = mcspi_dma->dma_rx_sync_dev;
+ mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+ if (!mcspi_dma->dma_rx) {
+ dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n");
return -EAGAIN;
}
- if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
- omap2_mcspi_dma_tx_callback, spi,
- &mcspi_dma->dma_tx_channel)) {
- omap_free_dma(mcspi_dma->dma_rx_channel);
- mcspi_dma->dma_rx_channel = -1;
- dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
+ sig = mcspi_dma->dma_tx_sync_dev;
+ mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+ if (!mcspi_dma->dma_tx) {
+ dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n");
+ dma_release_channel(mcspi_dma->dma_rx);
+ mcspi_dma->dma_rx = NULL;
return -EAGAIN;
}
- init_completion(&mcspi_dma->dma_rx_completion);
- init_completion(&mcspi_dma->dma_tx_completion);
-
return 0;
}
@@ -812,8 +840,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
list_add_tail(&cs->node, &ctx->cs);
}
- if (mcspi_dma->dma_rx_channel == -1
- || mcspi_dma->dma_tx_channel == -1) {
+ if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
ret = omap2_mcspi_request_dma(spi);
if (ret < 0)
return ret;
@@ -848,13 +875,13 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
if (spi->chip_select < spi->master->num_chipselect) {
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
- if (mcspi_dma->dma_rx_channel != -1) {
- omap_free_dma(mcspi_dma->dma_rx_channel);
- mcspi_dma->dma_rx_channel = -1;
+ if (mcspi_dma->dma_rx) {
+ dma_release_channel(mcspi_dma->dma_rx);
+ mcspi_dma->dma_rx = NULL;
}
- if (mcspi_dma->dma_tx_channel != -1) {
- omap_free_dma(mcspi_dma->dma_tx_channel);
- mcspi_dma->dma_tx_channel = -1;
+ if (mcspi_dma->dma_tx) {
+ dma_release_channel(mcspi_dma->dma_tx);
+ mcspi_dma->dma_tx = NULL;
}
}
}
@@ -1174,7 +1201,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
break;
}
- mcspi->dma_channels[i].dma_rx_channel = -1;
mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
sprintf(dma_ch_name, "tx%d", i);
dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
@@ -1185,7 +1211,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
break;
}
- mcspi->dma_channels[i].dma_tx_channel = -1;
mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
}
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
new file mode 100644
index 000000000000..eb475a8ea25b
--- /dev/null
+++ b/include/linux/omap-dma.h
@@ -0,0 +1,22 @@
+/*
+ * OMAP DMA Engine support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_OMAP_DMA_H
+#define __LINUX_OMAP_DMA_H
+
+struct dma_chan;
+
+#if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE)
+bool omap_dma_filter_fn(struct dma_chan *, void *);
+#else
+static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
+{
+ return false;
+}
+#endif
+
+#endif