2 * Renesas SuperH DMA Engine support
4 * base is drivers/dma/flsdma.c
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/sh_dma.h>
30 #include <linux/notifier.h>
31 #include <linux/kdebug.h>
32 #include <linux/spinlock.h>
33 #include <linux/rculist.h>
35 #include "dmaengine.h"
38 /* DMA descriptor control */
39 enum sh_dmae_desc_status {
43 DESC_COMPLETED, /* completed, have to call callback */
44 DESC_WAITING, /* callback called, waiting for ack / re-submit */
47 #define NR_DESCS_PER_CHANNEL 32
48 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
49 #define LOG2_DEFAULT_XFER_SIZE 2
52 * Used for write-side mutual exclusion for the global device list,
53 * read-side synchronization by way of RCU, and per-controller data.
55 static DEFINE_SPINLOCK(sh_dmae_lock);
56 static LIST_HEAD(sh_dmae_devices);
58 /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
59 static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
61 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
63 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
65 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
68 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
70 return __raw_readl(sh_dc->base + reg / sizeof(u32));
73 static u16 dmaor_read(struct sh_dmae_device *shdev)
75 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
77 if (shdev->pdata->dmaor_is_32bit)
78 return __raw_readl(addr);
80 return __raw_readw(addr);
83 static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
85 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
87 if (shdev->pdata->dmaor_is_32bit)
88 __raw_writel(data, addr);
90 __raw_writew(data, addr);
93 static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
95 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
97 __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
100 static u32 chcr_read(struct sh_dmae_chan *sh_dc)
102 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
104 return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
108 * Reset DMA controller
110 * SH7780 has two DMAOR register
112 static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
114 unsigned short dmaor;
117 spin_lock_irqsave(&sh_dmae_lock, flags);
119 dmaor = dmaor_read(shdev);
120 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
122 spin_unlock_irqrestore(&sh_dmae_lock, flags);
125 static int sh_dmae_rst(struct sh_dmae_device *shdev)
127 unsigned short dmaor;
130 spin_lock_irqsave(&sh_dmae_lock, flags);
132 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
134 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
136 dmaor = dmaor_read(shdev);
138 spin_unlock_irqrestore(&sh_dmae_lock, flags);
140 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
141 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
147 static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
149 u32 chcr = chcr_read(sh_chan);
151 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
152 return true; /* working */
154 return false; /* waiting */
157 static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
159 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
160 struct sh_dmae_pdata *pdata = shdev->pdata;
161 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
162 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
164 if (cnt >= pdata->ts_shift_num)
167 return pdata->ts_shift[cnt];
170 static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
172 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
173 struct sh_dmae_pdata *pdata = shdev->pdata;
176 for (i = 0; i < pdata->ts_shift_num; i++)
177 if (pdata->ts_shift[i] == l2size)
180 if (i == pdata->ts_shift_num)
183 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
184 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
187 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
189 sh_dmae_writel(sh_chan, hw->sar, SAR);
190 sh_dmae_writel(sh_chan, hw->dar, DAR);
191 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
194 static void dmae_start(struct sh_dmae_chan *sh_chan)
196 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
197 u32 chcr = chcr_read(sh_chan);
199 if (shdev->pdata->needs_tend_set)
200 sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
202 chcr |= CHCR_DE | shdev->chcr_ie_bit;
203 chcr_write(sh_chan, chcr & ~CHCR_TE);
206 static void dmae_halt(struct sh_dmae_chan *sh_chan)
208 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
209 u32 chcr = chcr_read(sh_chan);
211 chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
212 chcr_write(sh_chan, chcr);
215 static void dmae_init(struct sh_dmae_chan *sh_chan)
218 * Default configuration for dual address memory-memory transfer.
219 * 0x400 represents auto-request.
221 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
222 LOG2_DEFAULT_XFER_SIZE);
223 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
224 chcr_write(sh_chan, chcr);
227 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
229 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
230 if (dmae_is_busy(sh_chan))
233 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
234 chcr_write(sh_chan, val);
239 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
241 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
242 struct sh_dmae_pdata *pdata = shdev->pdata;
243 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
244 u16 __iomem *addr = shdev->dmars;
245 unsigned int shift = chan_pdata->dmars_bit;
247 if (dmae_is_busy(sh_chan))
253 /* in the case of a missing DMARS resource use first memory window */
255 addr = (u16 __iomem *)shdev->chan_reg;
256 addr += chan_pdata->dmars / sizeof(u16);
258 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
264 static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
266 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
267 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
268 dma_async_tx_callback callback = tx->callback;
271 spin_lock_bh(&sh_chan->desc_lock);
273 cookie = dma_cookie_assign(tx);
275 /* Mark all chunks of this descriptor as submitted, move to the queue */
276 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
278 * All chunks are on the global ld_free, so, we have to find
279 * the end of the chain ourselves
281 if (chunk != desc && (chunk->mark == DESC_IDLE ||
282 chunk->async_tx.cookie > 0 ||
283 chunk->async_tx.cookie == -EBUSY ||
284 &chunk->node == &sh_chan->ld_free))
286 chunk->mark = DESC_SUBMITTED;
287 /* Callback goes to the last chunk */
288 chunk->async_tx.callback = NULL;
289 chunk->cookie = cookie;
290 list_move_tail(&chunk->node, &sh_chan->ld_queue);
294 last->async_tx.callback = callback;
295 last->async_tx.callback_param = tx->callback_param;
297 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
298 tx->cookie, &last->async_tx, sh_chan->id,
299 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
301 spin_unlock_bh(&sh_chan->desc_lock);
306 /* Called with desc_lock held */
307 static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
309 struct sh_desc *desc;
311 list_for_each_entry(desc, &sh_chan->ld_free, node)
312 if (desc->mark != DESC_PREPARED) {
313 BUG_ON(desc->mark != DESC_IDLE);
314 list_del(&desc->node);
321 static const struct sh_dmae_slave_config *sh_dmae_find_slave(
322 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
324 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
325 struct sh_dmae_pdata *pdata = shdev->pdata;
328 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
331 for (i = 0; i < pdata->slave_num; i++)
332 if (pdata->slave[i].slave_id == param->slave_id)
333 return pdata->slave + i;
338 static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
340 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
341 struct sh_desc *desc;
342 struct sh_dmae_slave *param = chan->private;
345 pm_runtime_get_sync(sh_chan->dev);
348 * This relies on the guarantee from dmaengine that alloc_chan_resources
349 * never runs concurrently with itself or free_chan_resources.
352 const struct sh_dmae_slave_config *cfg;
354 cfg = sh_dmae_find_slave(sh_chan, param);
360 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
367 dmae_set_dmars(sh_chan, cfg->mid_rid);
368 dmae_set_chcr(sh_chan, cfg->chcr);
373 spin_lock_bh(&sh_chan->desc_lock);
374 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
375 spin_unlock_bh(&sh_chan->desc_lock);
376 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
378 spin_lock_bh(&sh_chan->desc_lock);
381 dma_async_tx_descriptor_init(&desc->async_tx,
383 desc->async_tx.tx_submit = sh_dmae_tx_submit;
384 desc->mark = DESC_IDLE;
386 spin_lock_bh(&sh_chan->desc_lock);
387 list_add(&desc->node, &sh_chan->ld_free);
388 sh_chan->descs_allocated++;
390 spin_unlock_bh(&sh_chan->desc_lock);
392 if (!sh_chan->descs_allocated) {
397 return sh_chan->descs_allocated;
401 clear_bit(param->slave_id, sh_dmae_slave_used);
404 pm_runtime_put(sh_chan->dev);
409 * sh_dma_free_chan_resources - Free all resources of the channel.
411 static void sh_dmae_free_chan_resources(struct dma_chan *chan)
413 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
414 struct sh_desc *desc, *_desc;
416 int descs = sh_chan->descs_allocated;
418 /* Protect against ISR */
419 spin_lock_irq(&sh_chan->desc_lock);
421 spin_unlock_irq(&sh_chan->desc_lock);
423 /* Now no new interrupts will occur */
425 /* Prepared and not submitted descriptors can still be on the queue */
426 if (!list_empty(&sh_chan->ld_queue))
427 sh_dmae_chan_ld_cleanup(sh_chan, true);
430 /* The caller is holding dma_list_mutex */
431 struct sh_dmae_slave *param = chan->private;
432 clear_bit(param->slave_id, sh_dmae_slave_used);
433 chan->private = NULL;
436 spin_lock_bh(&sh_chan->desc_lock);
438 list_splice_init(&sh_chan->ld_free, &list);
439 sh_chan->descs_allocated = 0;
441 spin_unlock_bh(&sh_chan->desc_lock);
444 pm_runtime_put(sh_chan->dev);
446 list_for_each_entry_safe(desc, _desc, &list, node)
451 * sh_dmae_add_desc - get, set up and return one transfer descriptor
452 * @sh_chan: DMA channel
453 * @flags: DMA transfer flags
454 * @dest: destination DMA address, incremented when direction equals
455 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
456 * @src: source DMA address, incremented when direction equals
457 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
458 * @len: DMA transfer length
459 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
460 * @direction: needed for slave DMA to decide which address to keep constant,
461 * equals DMA_BIDIRECTIONAL for MEMCPY
462 * Returns 0 or an error
463 * Locks: called with desc_lock held
465 static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
466 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
467 struct sh_desc **first, enum dma_data_direction direction)
475 /* Allocate the link descriptor from the free list */
476 new = sh_dmae_get_desc(sh_chan);
478 dev_err(sh_chan->dev, "No free link descriptor available\n");
482 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
486 new->hw.tcr = copy_size;
490 new->async_tx.cookie = -EBUSY;
493 /* Other desc - invisible to the user */
494 new->async_tx.cookie = -EINVAL;
497 dev_dbg(sh_chan->dev,
498 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
499 copy_size, *len, *src, *dest, &new->async_tx,
500 new->async_tx.cookie, sh_chan->xmit_shift);
502 new->mark = DESC_PREPARED;
503 new->async_tx.flags = flags;
504 new->direction = direction;
507 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
509 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
516 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
518 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
519 * converted to scatter-gather to guarantee consistent locking and a correct
520 * list manipulation. For slave DMA direction carries the usual meaning, and,
521 * logically, the SG list is RAM and the addr variable contains slave address,
522 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
523 * and the SG list contains only one element and points at the source buffer.
525 static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
526 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
527 enum dma_data_direction direction, unsigned long flags)
529 struct scatterlist *sg;
530 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
538 for_each_sg(sgl, sg, sg_len, i)
539 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
540 (SH_DMA_TCR_MAX + 1);
542 /* Have to lock the whole loop to protect against concurrent release */
543 spin_lock_bh(&sh_chan->desc_lock);
547 * first descriptor is what user is dealing with in all API calls, its
548 * cookie is at first set to -EBUSY, at tx-submit to a positive
550 * if more than one chunk is needed further chunks have cookie = -EINVAL
551 * the last chunk, if not equal to the first, has cookie = -ENOSPC
552 * all chunks are linked onto the tx_list head with their .node heads
553 * only during this function, then they are immediately spliced
554 * back onto the free list in form of a chain
556 for_each_sg(sgl, sg, sg_len, i) {
557 dma_addr_t sg_addr = sg_dma_address(sg);
558 size_t len = sg_dma_len(sg);
564 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
565 i, sg, len, (unsigned long long)sg_addr);
567 if (direction == DMA_FROM_DEVICE)
568 new = sh_dmae_add_desc(sh_chan, flags,
569 &sg_addr, addr, &len, &first,
572 new = sh_dmae_add_desc(sh_chan, flags,
573 addr, &sg_addr, &len, &first,
578 new->chunks = chunks--;
579 list_add_tail(&new->node, &tx_list);
584 new->async_tx.cookie = -ENOSPC;
586 /* Put them back on the free list, so, they don't get lost */
587 list_splice_tail(&tx_list, &sh_chan->ld_free);
589 spin_unlock_bh(&sh_chan->desc_lock);
591 return &first->async_tx;
594 list_for_each_entry(new, &tx_list, node)
595 new->mark = DESC_IDLE;
596 list_splice(&tx_list, &sh_chan->ld_free);
598 spin_unlock_bh(&sh_chan->desc_lock);
603 static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
604 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
605 size_t len, unsigned long flags)
607 struct sh_dmae_chan *sh_chan;
608 struct scatterlist sg;
613 sh_chan = to_sh_chan(chan);
615 sg_init_table(&sg, 1);
616 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
617 offset_in_page(dma_src));
618 sg_dma_address(&sg) = dma_src;
619 sg_dma_len(&sg) = len;
621 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
625 static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
626 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
627 enum dma_data_direction direction, unsigned long flags)
629 struct sh_dmae_slave *param;
630 struct sh_dmae_chan *sh_chan;
631 dma_addr_t slave_addr;
636 sh_chan = to_sh_chan(chan);
637 param = chan->private;
639 /* Someone calling slave DMA on a public channel? */
640 if (!param || !sg_len) {
641 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
642 __func__, param, sg_len, param ? param->slave_id : -1);
646 slave_addr = param->config->addr;
649 * if (param != NULL), this is a successfully requested slave channel,
650 * therefore param->config != NULL too.
652 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
656 static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
659 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
661 /* Only supports DMA_TERMINATE_ALL */
662 if (cmd != DMA_TERMINATE_ALL)
668 spin_lock_bh(&sh_chan->desc_lock);
671 if (!list_empty(&sh_chan->ld_queue)) {
672 /* Record partial transfer */
673 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
674 struct sh_desc, node);
675 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
679 spin_unlock_bh(&sh_chan->desc_lock);
681 sh_dmae_chan_ld_cleanup(sh_chan, true);
686 static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
688 struct sh_desc *desc, *_desc;
689 /* Is the "exposed" head of a chain acked? */
690 bool head_acked = false;
691 dma_cookie_t cookie = 0;
692 dma_async_tx_callback callback = NULL;
695 spin_lock_bh(&sh_chan->desc_lock);
696 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
697 struct dma_async_tx_descriptor *tx = &desc->async_tx;
699 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
700 BUG_ON(desc->mark != DESC_SUBMITTED &&
701 desc->mark != DESC_COMPLETED &&
702 desc->mark != DESC_WAITING);
705 * queue is ordered, and we use this loop to (1) clean up all
706 * completed descriptors, and to (2) update descriptor flags of
707 * any chunks in a (partially) completed chain
709 if (!all && desc->mark == DESC_SUBMITTED &&
710 desc->cookie != cookie)
716 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
717 if (sh_chan->common.completed_cookie != desc->cookie - 1)
718 dev_dbg(sh_chan->dev,
719 "Completing cookie %d, expected %d\n",
721 sh_chan->common.completed_cookie + 1);
722 sh_chan->common.completed_cookie = desc->cookie;
725 /* Call callback on the last chunk */
726 if (desc->mark == DESC_COMPLETED && tx->callback) {
727 desc->mark = DESC_WAITING;
728 callback = tx->callback;
729 param = tx->callback_param;
730 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
731 tx->cookie, tx, sh_chan->id);
732 BUG_ON(desc->chunks != 1);
736 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
737 if (desc->mark == DESC_COMPLETED) {
738 BUG_ON(tx->cookie < 0);
739 desc->mark = DESC_WAITING;
741 head_acked = async_tx_test_ack(tx);
743 switch (desc->mark) {
745 desc->mark = DESC_WAITING;
749 async_tx_ack(&desc->async_tx);
753 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
756 if (((desc->mark == DESC_COMPLETED ||
757 desc->mark == DESC_WAITING) &&
758 async_tx_test_ack(&desc->async_tx)) || all) {
759 /* Remove from ld_queue list */
760 desc->mark = DESC_IDLE;
761 list_move(&desc->node, &sh_chan->ld_free);
765 if (all && !callback)
767 * Terminating and the loop completed normally: forgive
768 * uncompleted cookies
770 sh_chan->common.completed_cookie = sh_chan->common.cookie;
772 spin_unlock_bh(&sh_chan->desc_lock);
781 * sh_chan_ld_cleanup - Clean up link descriptors
783 * This function cleans up the ld_queue of DMA channel.
785 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
787 while (__ld_cleanup(sh_chan, all))
791 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
793 struct sh_desc *desc;
795 spin_lock_bh(&sh_chan->desc_lock);
797 if (dmae_is_busy(sh_chan))
798 goto sh_chan_xfer_ld_queue_end;
800 /* Find the first not transferred descriptor */
801 list_for_each_entry(desc, &sh_chan->ld_queue, node)
802 if (desc->mark == DESC_SUBMITTED) {
803 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
804 desc->async_tx.cookie, sh_chan->id,
805 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
806 /* Get the ld start address from ld_queue */
807 dmae_set_reg(sh_chan, &desc->hw);
812 sh_chan_xfer_ld_queue_end:
813 spin_unlock_bh(&sh_chan->desc_lock);
816 static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
818 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
819 sh_chan_xfer_ld_queue(sh_chan);
822 static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
824 struct dma_tx_state *txstate)
826 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
827 enum dma_status status;
829 sh_dmae_chan_ld_cleanup(sh_chan, false);
831 spin_lock_bh(&sh_chan->desc_lock);
833 status = dma_cookie_status(chan, cookie, txstate);
836 * If we don't find cookie on the queue, it has been aborted and we have
839 if (status != DMA_SUCCESS) {
840 struct sh_desc *desc;
842 list_for_each_entry(desc, &sh_chan->ld_queue, node)
843 if (desc->cookie == cookie) {
844 status = DMA_IN_PROGRESS;
849 spin_unlock_bh(&sh_chan->desc_lock);
854 static irqreturn_t sh_dmae_interrupt(int irq, void *data)
856 irqreturn_t ret = IRQ_NONE;
857 struct sh_dmae_chan *sh_chan = data;
860 spin_lock(&sh_chan->desc_lock);
862 chcr = chcr_read(sh_chan);
864 if (chcr & CHCR_TE) {
869 tasklet_schedule(&sh_chan->tasklet);
872 spin_unlock(&sh_chan->desc_lock);
877 /* Called from error IRQ or NMI */
878 static bool sh_dmae_reset(struct sh_dmae_device *shdev)
880 unsigned int handled = 0;
883 /* halt the dma controller */
884 sh_dmae_ctl_stop(shdev);
886 /* We cannot detect, which channel caused the error, have to reset all */
887 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
888 struct sh_dmae_chan *sh_chan = shdev->chan[i];
889 struct sh_desc *desc;
895 spin_lock(&sh_chan->desc_lock);
897 /* Stop the channel */
900 list_splice_init(&sh_chan->ld_queue, &dl);
902 spin_unlock(&sh_chan->desc_lock);
905 list_for_each_entry(desc, &dl, node) {
906 struct dma_async_tx_descriptor *tx = &desc->async_tx;
907 desc->mark = DESC_IDLE;
909 tx->callback(tx->callback_param);
912 spin_lock(&sh_chan->desc_lock);
913 list_splice(&dl, &sh_chan->ld_free);
914 spin_unlock(&sh_chan->desc_lock);
924 static irqreturn_t sh_dmae_err(int irq, void *data)
926 struct sh_dmae_device *shdev = data;
928 if (!(dmaor_read(shdev) & DMAOR_AE))
935 static void dmae_do_tasklet(unsigned long data)
937 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
938 struct sh_desc *desc;
939 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
940 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
942 spin_lock(&sh_chan->desc_lock);
943 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
944 if (desc->mark == DESC_SUBMITTED &&
945 ((desc->direction == DMA_FROM_DEVICE &&
946 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
947 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
948 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
949 desc->async_tx.cookie, &desc->async_tx,
951 desc->mark = DESC_COMPLETED;
955 spin_unlock(&sh_chan->desc_lock);
958 sh_chan_xfer_ld_queue(sh_chan);
959 sh_dmae_chan_ld_cleanup(sh_chan, false);
962 static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
964 /* Fast path out if NMIF is not asserted for this controller */
965 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
968 return sh_dmae_reset(shdev);
971 static int sh_dmae_nmi_handler(struct notifier_block *self,
972 unsigned long cmd, void *data)
974 struct sh_dmae_device *shdev;
975 int ret = NOTIFY_DONE;
979 * Only concern ourselves with NMI events.
981 * Normally we would check the die chain value, but as this needs
982 * to be architecture independent, check for NMI context instead.
988 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
990 * Only stop if one of the controllers has NMIF asserted,
991 * we do not want to interfere with regular address error
992 * handling or NMI events that don't concern the DMACs.
994 triggered = sh_dmae_nmi_notify(shdev);
995 if (triggered == true)
1003 static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
1004 .notifier_call = sh_dmae_nmi_handler,
1006 /* Run before NMI debug handler and KGDB */
1010 static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
1011 int irq, unsigned long flags)
1014 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
1015 struct platform_device *pdev = to_platform_device(shdev->common.dev);
1016 struct sh_dmae_chan *new_sh_chan;
1019 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
1021 dev_err(shdev->common.dev,
1022 "No free memory for allocating dma channels!\n");
1026 /* copy struct dma_device */
1027 new_sh_chan->common.device = &shdev->common;
1029 new_sh_chan->dev = shdev->common.dev;
1030 new_sh_chan->id = id;
1031 new_sh_chan->irq = irq;
1032 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
1034 /* Init DMA tasklet */
1035 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
1036 (unsigned long)new_sh_chan);
1038 spin_lock_init(&new_sh_chan->desc_lock);
1040 /* Init descripter manage list */
1041 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
1042 INIT_LIST_HEAD(&new_sh_chan->ld_free);
1044 /* Add the channel to DMA device channel list */
1045 list_add_tail(&new_sh_chan->common.device_node,
1046 &shdev->common.channels);
1047 shdev->common.chancnt++;
1050 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1051 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
1053 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1054 "sh-dma%d", new_sh_chan->id);
1056 /* set up channel irq */
1057 err = request_irq(irq, &sh_dmae_interrupt, flags,
1058 new_sh_chan->dev_id, new_sh_chan);
1060 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
1061 "with return %d\n", id, err);
1065 shdev->chan[id] = new_sh_chan;
1069 /* remove from dmaengine device node */
1070 list_del(&new_sh_chan->common.device_node);
1075 static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
1079 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
1080 if (shdev->chan[i]) {
1081 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1083 free_irq(sh_chan->irq, sh_chan);
1085 list_del(&sh_chan->common.device_node);
1087 shdev->chan[i] = NULL;
1090 shdev->common.chancnt = 0;
1093 static int __init sh_dmae_probe(struct platform_device *pdev)
1095 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
1096 unsigned long irqflags = IRQF_DISABLED,
1097 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
1098 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
1099 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
1100 struct sh_dmae_device *shdev;
1101 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
1103 /* get platform data */
1104 if (!pdata || !pdata->channel_num)
1107 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1108 /* DMARS area is optional */
1109 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1112 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
1113 * the error IRQ, in which case it is the only IRQ in this resource:
1114 * start == end. If it is the only IRQ resource, all channels also
1116 * 2. DMA channel IRQ resources can be specified one per resource or in
1117 * ranges (start != end)
1118 * 3. iff all events (channels and, optionally, error) on this
1119 * controller use the same IRQ, only one IRQ resource can be
1120 * specified, otherwise there must be one IRQ per channel, even if
1121 * some of them are equal
1122 * 4. if all IRQs on this controller are equal or if some specific IRQs
1123 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
1124 * requested with the IRQF_SHARED flag
1126 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1127 if (!chan || !errirq_res)
1130 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
1131 dev_err(&pdev->dev, "DMAC register region already claimed\n");
1135 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
1136 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
1142 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
1144 dev_err(&pdev->dev, "Not enough memory\n");
1148 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
1149 if (!shdev->chan_reg)
1152 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1158 shdev->pdata = pdata;
1160 if (pdata->chcr_offset)
1161 shdev->chcr_offset = pdata->chcr_offset;
1163 shdev->chcr_offset = CHCR;
1165 if (pdata->chcr_ie_bit)
1166 shdev->chcr_ie_bit = pdata->chcr_ie_bit;
1168 shdev->chcr_ie_bit = CHCR_IE;
1170 platform_set_drvdata(pdev, shdev);
1172 pm_runtime_enable(&pdev->dev);
1173 pm_runtime_get_sync(&pdev->dev);
1175 spin_lock_irq(&sh_dmae_lock);
1176 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
1177 spin_unlock_irq(&sh_dmae_lock);
1179 /* reset dma controller - only needed as a test */
1180 err = sh_dmae_rst(shdev);
1184 INIT_LIST_HEAD(&shdev->common.channels);
1186 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
1187 if (pdata->slave && pdata->slave_num)
1188 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
1190 shdev->common.device_alloc_chan_resources
1191 = sh_dmae_alloc_chan_resources;
1192 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1193 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
1194 shdev->common.device_tx_status = sh_dmae_tx_status;
1195 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
1197 /* Compulsory for DMA_SLAVE fields */
1198 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
1199 shdev->common.device_control = sh_dmae_control;
1201 shdev->common.dev = &pdev->dev;
1202 /* Default transfer size of 32 bytes requires 32-byte alignment */
1203 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
1205 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1206 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1209 chanirq_res = errirq_res;
1213 if (chanirq_res == errirq_res ||
1214 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
1215 irqflags = IRQF_SHARED;
1217 errirq = errirq_res->start;
1219 err = request_irq(errirq, sh_dmae_err, irqflags,
1220 "DMAC Address Error", shdev);
1223 "DMA failed requesting irq #%d, error %d\n",
1229 chanirq_res = errirq_res;
1230 #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
1232 if (chanirq_res->start == chanirq_res->end &&
1233 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1234 /* Special case - all multiplexed */
1235 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1236 if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
1237 chan_irq[irq_cnt] = chanirq_res->start;
1238 chan_flag[irq_cnt] = IRQF_SHARED;
1246 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1247 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1252 if ((errirq_res->flags & IORESOURCE_BITS) ==
1253 IORESOURCE_IRQ_SHAREABLE)
1254 chan_flag[irq_cnt] = IRQF_SHARED;
1256 chan_flag[irq_cnt] = IRQF_DISABLED;
1258 "Found IRQ %d for channel %d\n",
1260 chan_irq[irq_cnt++] = i;
1263 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
1266 chanirq_res = platform_get_resource(pdev,
1267 IORESOURCE_IRQ, ++irqres);
1268 } while (irq_cnt < pdata->channel_num && chanirq_res);
1271 /* Create DMA Channel */
1272 for (i = 0; i < irq_cnt; i++) {
1273 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
1275 goto chan_probe_err;
1279 dev_notice(&pdev->dev, "Attempting to register %d DMA "
1280 "channels when a maximum of %d are supported.\n",
1281 pdata->channel_num, SH_DMAC_MAX_CHANNELS);
1283 pm_runtime_put(&pdev->dev);
1285 dma_async_device_register(&shdev->common);
1290 sh_dmae_chan_remove(shdev);
1292 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1293 free_irq(errirq, shdev);
1297 spin_lock_irq(&sh_dmae_lock);
1298 list_del_rcu(&shdev->node);
1299 spin_unlock_irq(&sh_dmae_lock);
1301 pm_runtime_put(&pdev->dev);
1302 pm_runtime_disable(&pdev->dev);
1305 iounmap(shdev->dmars);
1307 platform_set_drvdata(pdev, NULL);
1309 iounmap(shdev->chan_reg);
1315 release_mem_region(dmars->start, resource_size(dmars));
1317 release_mem_region(chan->start, resource_size(chan));
1322 static int __exit sh_dmae_remove(struct platform_device *pdev)
1324 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1325 struct resource *res;
1326 int errirq = platform_get_irq(pdev, 0);
1328 dma_async_device_unregister(&shdev->common);
1331 free_irq(errirq, shdev);
1333 spin_lock_irq(&sh_dmae_lock);
1334 list_del_rcu(&shdev->node);
1335 spin_unlock_irq(&sh_dmae_lock);
1337 /* channel data remove */
1338 sh_dmae_chan_remove(shdev);
1340 pm_runtime_disable(&pdev->dev);
1343 iounmap(shdev->dmars);
1344 iounmap(shdev->chan_reg);
1346 platform_set_drvdata(pdev, NULL);
1351 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1353 release_mem_region(res->start, resource_size(res));
1354 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1356 release_mem_region(res->start, resource_size(res));
1361 static void sh_dmae_shutdown(struct platform_device *pdev)
1363 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1364 sh_dmae_ctl_stop(shdev);
1367 static int sh_dmae_runtime_suspend(struct device *dev)
1372 static int sh_dmae_runtime_resume(struct device *dev)
1374 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1376 return sh_dmae_rst(shdev);
1380 static int sh_dmae_suspend(struct device *dev)
1382 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1385 for (i = 0; i < shdev->pdata->channel_num; i++) {
1386 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1387 if (sh_chan->descs_allocated)
1388 sh_chan->pm_error = pm_runtime_put_sync(dev);
1394 static int sh_dmae_resume(struct device *dev)
1396 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1399 for (i = 0; i < shdev->pdata->channel_num; i++) {
1400 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1401 struct sh_dmae_slave *param = sh_chan->common.private;
1403 if (!sh_chan->descs_allocated)
1406 if (!sh_chan->pm_error)
1407 pm_runtime_get_sync(dev);
1410 const struct sh_dmae_slave_config *cfg = param->config;
1411 dmae_set_dmars(sh_chan, cfg->mid_rid);
1412 dmae_set_chcr(sh_chan, cfg->chcr);
1421 #define sh_dmae_suspend NULL
1422 #define sh_dmae_resume NULL
1425 const struct dev_pm_ops sh_dmae_pm = {
1426 .suspend = sh_dmae_suspend,
1427 .resume = sh_dmae_resume,
1428 .runtime_suspend = sh_dmae_runtime_suspend,
1429 .runtime_resume = sh_dmae_runtime_resume,
1432 static struct platform_driver sh_dmae_driver = {
1433 .remove = __exit_p(sh_dmae_remove),
1434 .shutdown = sh_dmae_shutdown,
1436 .owner = THIS_MODULE,
1437 .name = "sh-dma-engine",
1442 static int __init sh_dmae_init(void)
1444 /* Wire up NMI handling */
1445 int err = register_die_notifier(&sh_dmae_nmi_notifier);
1449 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1451 module_init(sh_dmae_init);
1453 static void __exit sh_dmae_exit(void)
1455 platform_driver_unregister(&sh_dmae_driver);
1457 unregister_die_notifier(&sh_dmae_nmi_notifier);
1459 module_exit(sh_dmae_exit);
1461 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1462 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1463 MODULE_LICENSE("GPL");
1464 MODULE_ALIAS("platform:sh-dma-engine");