2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
5 * Copyright (C) 2007-2008 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
24 #include "dw_dmac_regs.h"
25 #include "dmaengine.h"
28 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
29 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
30 * of which use ARM any more). See the "Databook" from Synopsys for
31 * information beyond what licensees probably provide.
33 * The driver has currently been tested only with the Atmel AT32AP7000,
34 * which does not support descriptor writeback.
37 #define DWC_DEFAULT_CTLLO(private) ({ \
38 struct dw_dma_slave *__slave = (private); \
39 int dms = __slave ? __slave->dst_master : 0; \
40 int sms = __slave ? __slave->src_master : 1; \
41 u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \
42 u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \
44 (DWC_CTLL_DST_MSIZE(dmsize) \
45 | DWC_CTLL_SRC_MSIZE(smsize) \
49 | DWC_CTLL_SMS(sms)); \
53 * This is configuration-dependent and usually a funny size like 4095.
55 * Note that this is a transfer count, i.e. if we transfer 32-bit
56 * words, we can do 16380 bytes per descriptor.
58 * This parameter is also system-specific.
60 #define DWC_MAX_COUNT 4095U
63 * Number of descriptors to allocate for each channel. This should be
64 * made configurable somehow; preferably, the clients (at least the
65 * ones using slave transfers) should be able to give us a hint.
67 #define NR_DESCS_PER_CHANNEL 64
69 /*----------------------------------------------------------------------*/
72 * Because we're not relying on writeback from the controller (it may not
73 * even be configured into the core!) we don't need to use dma_pool. These
74 * descriptors -- and associated data -- are cacheable. We do need to make
75 * sure their dcache entries are written back before handing them off to
76 * the controller, though.
79 static struct device *chan2dev(struct dma_chan *chan)
81 return &chan->dev->device;
83 static struct device *chan2parent(struct dma_chan *chan)
85 return chan->dev->device.parent;
88 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
90 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
93 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
95 struct dw_desc *desc, *_desc;
96 struct dw_desc *ret = NULL;
100 spin_lock_irqsave(&dwc->lock, flags);
101 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
102 if (async_tx_test_ack(&desc->txd)) {
103 list_del(&desc->desc_node);
107 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
110 spin_unlock_irqrestore(&dwc->lock, flags);
112 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
117 static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
119 struct dw_desc *child;
121 list_for_each_entry(child, &desc->tx_list, desc_node)
122 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
123 child->txd.phys, sizeof(child->lli),
125 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
126 desc->txd.phys, sizeof(desc->lli),
131 * Move a descriptor, including any children, to the free list.
132 * `desc' must not be on any lists.
134 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
139 struct dw_desc *child;
141 dwc_sync_desc_for_cpu(dwc, desc);
143 spin_lock_irqsave(&dwc->lock, flags);
144 list_for_each_entry(child, &desc->tx_list, desc_node)
145 dev_vdbg(chan2dev(&dwc->chan),
146 "moving child desc %p to freelist\n",
148 list_splice_init(&desc->tx_list, &dwc->free_list);
149 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
150 list_add(&desc->desc_node, &dwc->free_list);
151 spin_unlock_irqrestore(&dwc->lock, flags);
155 /* Called with dwc->lock held and bh disabled */
157 dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
159 dma_cookie_t cookie = dwc->chan.cookie;
164 dwc->chan.cookie = cookie;
165 desc->txd.cookie = cookie;
170 /*----------------------------------------------------------------------*/
172 /* Called with dwc->lock held and bh disabled */
173 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
175 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
177 /* ASSERT: channel is idle */
178 if (dma_readl(dw, CH_EN) & dwc->mask) {
179 dev_err(chan2dev(&dwc->chan),
180 "BUG: Attempted to start non-idle channel\n");
181 dev_err(chan2dev(&dwc->chan),
182 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
183 channel_readl(dwc, SAR),
184 channel_readl(dwc, DAR),
185 channel_readl(dwc, LLP),
186 channel_readl(dwc, CTL_HI),
187 channel_readl(dwc, CTL_LO));
189 /* The tasklet will hopefully advance the queue... */
193 channel_writel(dwc, LLP, first->txd.phys);
194 channel_writel(dwc, CTL_LO,
195 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
196 channel_writel(dwc, CTL_HI, 0);
197 channel_set_bit(dw, CH_EN, dwc->mask);
200 /*----------------------------------------------------------------------*/
203 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
204 bool callback_required)
206 dma_async_tx_callback callback = NULL;
208 struct dma_async_tx_descriptor *txd = &desc->txd;
209 struct dw_desc *child;
212 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
214 spin_lock_irqsave(&dwc->lock, flags);
215 dwc->chan.completed_cookie = txd->cookie;
216 if (callback_required) {
217 callback = txd->callback;
218 param = txd->callback_param;
221 dwc_sync_desc_for_cpu(dwc, desc);
224 list_for_each_entry(child, &desc->tx_list, desc_node)
225 async_tx_ack(&child->txd);
226 async_tx_ack(&desc->txd);
228 list_splice_init(&desc->tx_list, &dwc->free_list);
229 list_move(&desc->desc_node, &dwc->free_list);
231 if (!dwc->chan.private) {
232 struct device *parent = chan2parent(&dwc->chan);
233 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
234 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
235 dma_unmap_single(parent, desc->lli.dar,
236 desc->len, DMA_FROM_DEVICE);
238 dma_unmap_page(parent, desc->lli.dar,
239 desc->len, DMA_FROM_DEVICE);
241 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
242 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
243 dma_unmap_single(parent, desc->lli.sar,
244 desc->len, DMA_TO_DEVICE);
246 dma_unmap_page(parent, desc->lli.sar,
247 desc->len, DMA_TO_DEVICE);
251 spin_unlock_irqrestore(&dwc->lock, flags);
253 if (callback_required && callback)
257 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
259 struct dw_desc *desc, *_desc;
263 spin_lock_irqsave(&dwc->lock, flags);
264 if (dma_readl(dw, CH_EN) & dwc->mask) {
265 dev_err(chan2dev(&dwc->chan),
266 "BUG: XFER bit set, but channel not idle!\n");
268 /* Try to continue after resetting the channel... */
269 channel_clear_bit(dw, CH_EN, dwc->mask);
270 while (dma_readl(dw, CH_EN) & dwc->mask)
275 * Submit queued descriptors ASAP, i.e. before we go through
276 * the completed ones.
278 list_splice_init(&dwc->active_list, &list);
279 if (!list_empty(&dwc->queue)) {
280 list_move(dwc->queue.next, &dwc->active_list);
281 dwc_dostart(dwc, dwc_first_active(dwc));
284 spin_unlock_irqrestore(&dwc->lock, flags);
286 list_for_each_entry_safe(desc, _desc, &list, desc_node)
287 dwc_descriptor_complete(dwc, desc, true);
290 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
293 struct dw_desc *desc, *_desc;
294 struct dw_desc *child;
298 spin_lock_irqsave(&dwc->lock, flags);
300 * Clear block interrupt flag before scanning so that we don't
301 * miss any, and read LLP before RAW_XFER to ensure it is
302 * valid if we decide to scan the list.
304 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
305 llp = channel_readl(dwc, LLP);
306 status_xfer = dma_readl(dw, RAW.XFER);
308 if (status_xfer & dwc->mask) {
309 /* Everything we've submitted is done */
310 dma_writel(dw, CLEAR.XFER, dwc->mask);
311 spin_unlock_irqrestore(&dwc->lock, flags);
313 dwc_complete_all(dw, dwc);
317 if (list_empty(&dwc->active_list)) {
318 spin_unlock_irqrestore(&dwc->lock, flags);
322 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
324 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
325 /* check first descriptors addr */
326 if (desc->txd.phys == llp) {
327 spin_unlock_irqrestore(&dwc->lock, flags);
331 /* check first descriptors llp */
332 if (desc->lli.llp == llp) {
333 /* This one is currently in progress */
334 spin_unlock_irqrestore(&dwc->lock, flags);
338 list_for_each_entry(child, &desc->tx_list, desc_node)
339 if (child->lli.llp == llp) {
340 /* Currently in progress */
341 spin_unlock_irqrestore(&dwc->lock, flags);
346 * No descriptors so far seem to be in progress, i.e.
347 * this one must be done.
349 spin_unlock_irqrestore(&dwc->lock, flags);
350 dwc_descriptor_complete(dwc, desc, true);
351 spin_lock_irqsave(&dwc->lock, flags);
354 dev_err(chan2dev(&dwc->chan),
355 "BUG: All descriptors done, but channel not idle!\n");
357 /* Try to continue after resetting the channel... */
358 channel_clear_bit(dw, CH_EN, dwc->mask);
359 while (dma_readl(dw, CH_EN) & dwc->mask)
362 if (!list_empty(&dwc->queue)) {
363 list_move(dwc->queue.next, &dwc->active_list);
364 dwc_dostart(dwc, dwc_first_active(dwc));
366 spin_unlock_irqrestore(&dwc->lock, flags);
369 static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
371 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
372 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
373 lli->sar, lli->dar, lli->llp,
374 lli->ctlhi, lli->ctllo);
377 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
379 struct dw_desc *bad_desc;
380 struct dw_desc *child;
383 dwc_scan_descriptors(dw, dwc);
385 spin_lock_irqsave(&dwc->lock, flags);
388 * The descriptor currently at the head of the active list is
389 * borked. Since we don't have any way to report errors, we'll
390 * just have to scream loudly and try to carry on.
392 bad_desc = dwc_first_active(dwc);
393 list_del_init(&bad_desc->desc_node);
394 list_move(dwc->queue.next, dwc->active_list.prev);
396 /* Clear the error flag and try to restart the controller */
397 dma_writel(dw, CLEAR.ERROR, dwc->mask);
398 if (!list_empty(&dwc->active_list))
399 dwc_dostart(dwc, dwc_first_active(dwc));
402 * KERN_CRITICAL may seem harsh, but since this only happens
403 * when someone submits a bad physical address in a
404 * descriptor, we should consider ourselves lucky that the
405 * controller flagged an error instead of scribbling over
406 * random memory locations.
408 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
409 "Bad descriptor submitted for DMA!\n");
410 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
411 " cookie: %d\n", bad_desc->txd.cookie);
412 dwc_dump_lli(dwc, &bad_desc->lli);
413 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
414 dwc_dump_lli(dwc, &child->lli);
416 spin_unlock_irqrestore(&dwc->lock, flags);
418 /* Pretend the descriptor completed successfully */
419 dwc_descriptor_complete(dwc, bad_desc, true);
422 /* --------------------- Cyclic DMA API extensions -------------------- */
424 inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
426 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
427 return channel_readl(dwc, SAR);
429 EXPORT_SYMBOL(dw_dma_get_src_addr);
431 inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
433 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
434 return channel_readl(dwc, DAR);
436 EXPORT_SYMBOL(dw_dma_get_dst_addr);
438 /* called with dwc->lock held and all DMAC interrupts disabled */
439 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
440 u32 status_block, u32 status_err, u32 status_xfer)
444 if (status_block & dwc->mask) {
445 void (*callback)(void *param);
446 void *callback_param;
448 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
449 channel_readl(dwc, LLP));
450 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
452 callback = dwc->cdesc->period_callback;
453 callback_param = dwc->cdesc->period_callback_param;
456 callback(callback_param);
460 * Error and transfer complete are highly unlikely, and will most
461 * likely be due to a configuration error by the user.
463 if (unlikely(status_err & dwc->mask) ||
464 unlikely(status_xfer & dwc->mask)) {
467 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
468 "interrupt, stopping DMA transfer\n",
469 status_xfer ? "xfer" : "error");
471 spin_lock_irqsave(&dwc->lock, flags);
473 dev_err(chan2dev(&dwc->chan),
474 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
475 channel_readl(dwc, SAR),
476 channel_readl(dwc, DAR),
477 channel_readl(dwc, LLP),
478 channel_readl(dwc, CTL_HI),
479 channel_readl(dwc, CTL_LO));
481 channel_clear_bit(dw, CH_EN, dwc->mask);
482 while (dma_readl(dw, CH_EN) & dwc->mask)
485 /* make sure DMA does not restart by loading a new list */
486 channel_writel(dwc, LLP, 0);
487 channel_writel(dwc, CTL_LO, 0);
488 channel_writel(dwc, CTL_HI, 0);
490 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
491 dma_writel(dw, CLEAR.ERROR, dwc->mask);
492 dma_writel(dw, CLEAR.XFER, dwc->mask);
494 for (i = 0; i < dwc->cdesc->periods; i++)
495 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
497 spin_unlock_irqrestore(&dwc->lock, flags);
501 /* ------------------------------------------------------------------------- */
503 static void dw_dma_tasklet(unsigned long data)
505 struct dw_dma *dw = (struct dw_dma *)data;
506 struct dw_dma_chan *dwc;
512 status_block = dma_readl(dw, RAW.BLOCK);
513 status_xfer = dma_readl(dw, RAW.XFER);
514 status_err = dma_readl(dw, RAW.ERROR);
516 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
517 status_block, status_err);
519 for (i = 0; i < dw->dma.chancnt; i++) {
521 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
522 dwc_handle_cyclic(dw, dwc, status_block, status_err,
524 else if (status_err & (1 << i))
525 dwc_handle_error(dw, dwc);
526 else if ((status_block | status_xfer) & (1 << i))
527 dwc_scan_descriptors(dw, dwc);
531 * Re-enable interrupts. Block Complete interrupts are only
532 * enabled if the INT_EN bit in the descriptor is set. This
533 * will trigger a scan before the whole list is done.
535 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
536 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
537 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
540 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
542 struct dw_dma *dw = dev_id;
545 dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
546 dma_readl(dw, STATUS_INT));
549 * Just disable the interrupts. We'll turn them back on in the
552 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
553 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
554 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
556 status = dma_readl(dw, STATUS_INT);
559 "BUG: Unexpected interrupts pending: 0x%x\n",
563 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
564 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
565 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
566 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
567 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
570 tasklet_schedule(&dw->tasklet);
575 /*----------------------------------------------------------------------*/
577 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
579 struct dw_desc *desc = txd_to_dw_desc(tx);
580 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
584 spin_lock_irqsave(&dwc->lock, flags);
585 cookie = dwc_assign_cookie(dwc, desc);
588 * REVISIT: We should attempt to chain as many descriptors as
589 * possible, perhaps even appending to those already submitted
590 * for DMA. But this is hard to do in a race-free manner.
592 if (list_empty(&dwc->active_list)) {
593 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
595 list_add_tail(&desc->desc_node, &dwc->active_list);
596 dwc_dostart(dwc, dwc_first_active(dwc));
598 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
601 list_add_tail(&desc->desc_node, &dwc->queue);
604 spin_unlock_irqrestore(&dwc->lock, flags);
609 static struct dma_async_tx_descriptor *
610 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
611 size_t len, unsigned long flags)
613 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
614 struct dw_desc *desc;
615 struct dw_desc *first;
616 struct dw_desc *prev;
619 unsigned int src_width;
620 unsigned int dst_width;
623 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
624 dest, src, len, flags);
626 if (unlikely(!len)) {
627 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
632 * We can be a lot more clever here, but this should take care
633 * of the most common optimization.
635 if (!((src | dest | len) & 7))
636 src_width = dst_width = 3;
637 else if (!((src | dest | len) & 3))
638 src_width = dst_width = 2;
639 else if (!((src | dest | len) & 1))
640 src_width = dst_width = 1;
642 src_width = dst_width = 0;
644 ctllo = DWC_DEFAULT_CTLLO(chan->private)
645 | DWC_CTLL_DST_WIDTH(dst_width)
646 | DWC_CTLL_SRC_WIDTH(src_width)
652 for (offset = 0; offset < len; offset += xfer_count << src_width) {
653 xfer_count = min_t(size_t, (len - offset) >> src_width,
656 desc = dwc_desc_get(dwc);
660 desc->lli.sar = src + offset;
661 desc->lli.dar = dest + offset;
662 desc->lli.ctllo = ctllo;
663 desc->lli.ctlhi = xfer_count;
668 prev->lli.llp = desc->txd.phys;
669 dma_sync_single_for_device(chan2parent(chan),
670 prev->txd.phys, sizeof(prev->lli),
672 list_add_tail(&desc->desc_node,
679 if (flags & DMA_PREP_INTERRUPT)
680 /* Trigger interrupt after last block */
681 prev->lli.ctllo |= DWC_CTLL_INT_EN;
684 dma_sync_single_for_device(chan2parent(chan),
685 prev->txd.phys, sizeof(prev->lli),
688 first->txd.flags = flags;
694 dwc_desc_put(dwc, first);
698 static struct dma_async_tx_descriptor *
699 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
700 unsigned int sg_len, enum dma_data_direction direction,
703 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
704 struct dw_dma_slave *dws = chan->private;
705 struct dw_desc *prev;
706 struct dw_desc *first;
709 unsigned int reg_width;
710 unsigned int mem_width;
712 struct scatterlist *sg;
713 size_t total_len = 0;
715 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
717 if (unlikely(!dws || !sg_len))
720 reg_width = dws->reg_width;
725 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
726 | DWC_CTLL_DST_WIDTH(reg_width)
729 | DWC_CTLL_FC(dws->fc));
731 for_each_sg(sgl, sg, sg_len, i) {
732 struct dw_desc *desc;
736 len = sg_dma_len(sg);
738 if (unlikely(mem & 3 || len & 3))
741 slave_sg_todev_fill_desc:
742 desc = dwc_desc_get(dwc);
744 dev_err(chan2dev(chan),
745 "not enough descriptors available\n");
751 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
752 if ((len >> mem_width) > DWC_MAX_COUNT) {
753 dlen = DWC_MAX_COUNT << mem_width;
761 desc->lli.ctlhi = dlen >> mem_width;
766 prev->lli.llp = desc->txd.phys;
767 dma_sync_single_for_device(chan2parent(chan),
771 list_add_tail(&desc->desc_node,
778 goto slave_sg_todev_fill_desc;
781 case DMA_FROM_DEVICE:
782 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
783 | DWC_CTLL_SRC_WIDTH(reg_width)
786 | DWC_CTLL_FC(dws->fc));
789 for_each_sg(sgl, sg, sg_len, i) {
790 struct dw_desc *desc;
794 len = sg_dma_len(sg);
796 if (unlikely(mem & 3 || len & 3))
799 slave_sg_fromdev_fill_desc:
800 desc = dwc_desc_get(dwc);
802 dev_err(chan2dev(chan),
803 "not enough descriptors available\n");
809 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
810 if ((len >> reg_width) > DWC_MAX_COUNT) {
811 dlen = DWC_MAX_COUNT << reg_width;
818 desc->lli.ctlhi = dlen >> reg_width;
823 prev->lli.llp = desc->txd.phys;
824 dma_sync_single_for_device(chan2parent(chan),
828 list_add_tail(&desc->desc_node,
835 goto slave_sg_fromdev_fill_desc;
842 if (flags & DMA_PREP_INTERRUPT)
843 /* Trigger interrupt after last block */
844 prev->lli.ctllo |= DWC_CTLL_INT_EN;
847 dma_sync_single_for_device(chan2parent(chan),
848 prev->txd.phys, sizeof(prev->lli),
851 first->len = total_len;
856 dwc_desc_put(dwc, first);
860 static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
863 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
864 struct dw_dma *dw = to_dw_dma(chan->device);
865 struct dw_desc *desc, *_desc;
870 if (cmd == DMA_PAUSE) {
871 spin_lock_irqsave(&dwc->lock, flags);
873 cfglo = channel_readl(dwc, CFG_LO);
874 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
875 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
879 spin_unlock_irqrestore(&dwc->lock, flags);
880 } else if (cmd == DMA_RESUME) {
884 spin_lock_irqsave(&dwc->lock, flags);
886 cfglo = channel_readl(dwc, CFG_LO);
887 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
890 spin_unlock_irqrestore(&dwc->lock, flags);
891 } else if (cmd == DMA_TERMINATE_ALL) {
892 spin_lock_irqsave(&dwc->lock, flags);
894 channel_clear_bit(dw, CH_EN, dwc->mask);
895 while (dma_readl(dw, CH_EN) & dwc->mask)
900 /* active_list entries will end up before queued entries */
901 list_splice_init(&dwc->queue, &list);
902 list_splice_init(&dwc->active_list, &list);
904 spin_unlock_irqrestore(&dwc->lock, flags);
906 /* Flush all pending and queued descriptors */
907 list_for_each_entry_safe(desc, _desc, &list, desc_node)
908 dwc_descriptor_complete(dwc, desc, false);
915 static enum dma_status
916 dwc_tx_status(struct dma_chan *chan,
918 struct dma_tx_state *txstate)
920 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
921 dma_cookie_t last_used;
922 dma_cookie_t last_complete;
925 last_complete = chan->completed_cookie;
926 last_used = chan->cookie;
928 ret = dma_async_is_complete(cookie, last_complete, last_used);
929 if (ret != DMA_SUCCESS) {
930 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
932 last_complete = chan->completed_cookie;
933 last_used = chan->cookie;
935 ret = dma_async_is_complete(cookie, last_complete, last_used);
938 if (ret != DMA_SUCCESS)
939 dma_set_tx_state(txstate, last_complete, last_used,
940 dwc_first_active(dwc)->len);
942 dma_set_tx_state(txstate, last_complete, last_used, 0);
950 static void dwc_issue_pending(struct dma_chan *chan)
952 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
954 if (!list_empty(&dwc->queue))
955 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
958 static int dwc_alloc_chan_resources(struct dma_chan *chan)
960 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
961 struct dw_dma *dw = to_dw_dma(chan->device);
962 struct dw_desc *desc;
963 struct dw_dma_slave *dws;
969 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
971 /* ASSERT: channel is idle */
972 if (dma_readl(dw, CH_EN) & dwc->mask) {
973 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
977 chan->completed_cookie = chan->cookie = 1;
979 cfghi = DWC_CFGH_FIFO_MODE;
985 * We need controller-specific data to set up slave
988 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
991 cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
994 cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
996 channel_writel(dwc, CFG_LO, cfglo);
997 channel_writel(dwc, CFG_HI, cfghi);
1000 * NOTE: some controllers may have additional features that we
1001 * need to initialize here, like "scatter-gather" (which
1002 * doesn't mean what you think it means), and status writeback.
1005 spin_lock_irqsave(&dwc->lock, flags);
1006 i = dwc->descs_allocated;
1007 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
1008 spin_unlock_irqrestore(&dwc->lock, flags);
1010 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
1012 dev_info(chan2dev(chan),
1013 "only allocated %d descriptors\n", i);
1014 spin_lock_irqsave(&dwc->lock, flags);
1018 INIT_LIST_HEAD(&desc->tx_list);
1019 dma_async_tx_descriptor_init(&desc->txd, chan);
1020 desc->txd.tx_submit = dwc_tx_submit;
1021 desc->txd.flags = DMA_CTRL_ACK;
1022 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
1023 sizeof(desc->lli), DMA_TO_DEVICE);
1024 dwc_desc_put(dwc, desc);
1026 spin_lock_irqsave(&dwc->lock, flags);
1027 i = ++dwc->descs_allocated;
1030 /* Enable interrupts */
1031 channel_set_bit(dw, MASK.XFER, dwc->mask);
1032 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1033 channel_set_bit(dw, MASK.ERROR, dwc->mask);
1035 spin_unlock_irqrestore(&dwc->lock, flags);
1037 dev_dbg(chan2dev(chan),
1038 "alloc_chan_resources allocated %d descriptors\n", i);
1043 static void dwc_free_chan_resources(struct dma_chan *chan)
1045 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1046 struct dw_dma *dw = to_dw_dma(chan->device);
1047 struct dw_desc *desc, *_desc;
1048 unsigned long flags;
1051 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
1052 dwc->descs_allocated);
1054 /* ASSERT: channel is idle */
1055 BUG_ON(!list_empty(&dwc->active_list));
1056 BUG_ON(!list_empty(&dwc->queue));
1057 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1059 spin_lock_irqsave(&dwc->lock, flags);
1060 list_splice_init(&dwc->free_list, &list);
1061 dwc->descs_allocated = 0;
1063 /* Disable interrupts */
1064 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1065 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1066 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1068 spin_unlock_irqrestore(&dwc->lock, flags);
1070 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1071 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1072 dma_unmap_single(chan2parent(chan), desc->txd.phys,
1073 sizeof(desc->lli), DMA_TO_DEVICE);
1077 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
1080 /* --------------------- Cyclic DMA API extensions -------------------- */
1083 * dw_dma_cyclic_start - start the cyclic DMA transfer
1084 * @chan: the DMA channel to start
1086 * Must be called with soft interrupts disabled. Returns zero on success or
1087 * -errno on failure.
1089 int dw_dma_cyclic_start(struct dma_chan *chan)
1091 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1092 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1093 unsigned long flags;
1095 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1096 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1100 spin_lock_irqsave(&dwc->lock, flags);
1102 /* assert channel is idle */
1103 if (dma_readl(dw, CH_EN) & dwc->mask) {
1104 dev_err(chan2dev(&dwc->chan),
1105 "BUG: Attempted to start non-idle channel\n");
1106 dev_err(chan2dev(&dwc->chan),
1107 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
1108 channel_readl(dwc, SAR),
1109 channel_readl(dwc, DAR),
1110 channel_readl(dwc, LLP),
1111 channel_readl(dwc, CTL_HI),
1112 channel_readl(dwc, CTL_LO));
1113 spin_unlock_irqrestore(&dwc->lock, flags);
1117 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1118 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1119 dma_writel(dw, CLEAR.XFER, dwc->mask);
1121 /* setup DMAC channel registers */
1122 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1123 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1124 channel_writel(dwc, CTL_HI, 0);
1126 channel_set_bit(dw, CH_EN, dwc->mask);
1128 spin_unlock_irqrestore(&dwc->lock, flags);
1132 EXPORT_SYMBOL(dw_dma_cyclic_start);
1135 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1136 * @chan: the DMA channel to stop
1138 * Must be called with soft interrupts disabled.
1140 void dw_dma_cyclic_stop(struct dma_chan *chan)
1142 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1143 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1144 unsigned long flags;
1146 spin_lock_irqsave(&dwc->lock, flags);
1148 channel_clear_bit(dw, CH_EN, dwc->mask);
1149 while (dma_readl(dw, CH_EN) & dwc->mask)
1152 spin_unlock_irqrestore(&dwc->lock, flags);
1154 EXPORT_SYMBOL(dw_dma_cyclic_stop);
1157 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1158 * @chan: the DMA channel to prepare
1159 * @buf_addr: physical DMA address where the buffer starts
1160 * @buf_len: total number of bytes for the entire buffer
1161 * @period_len: number of bytes for each period
1162 * @direction: transfer direction, to or from device
1164 * Must be called before trying to start the transfer. Returns a valid struct
1165 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1167 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1168 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1169 enum dma_data_direction direction)
1171 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1172 struct dw_cyclic_desc *cdesc;
1173 struct dw_cyclic_desc *retval = NULL;
1174 struct dw_desc *desc;
1175 struct dw_desc *last = NULL;
1176 struct dw_dma_slave *dws = chan->private;
1177 unsigned long was_cyclic;
1178 unsigned int reg_width;
1179 unsigned int periods;
1181 unsigned long flags;
1183 spin_lock_irqsave(&dwc->lock, flags);
1184 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1185 spin_unlock_irqrestore(&dwc->lock, flags);
1186 dev_dbg(chan2dev(&dwc->chan),
1187 "queue and/or active list are not empty\n");
1188 return ERR_PTR(-EBUSY);
1191 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1192 spin_unlock_irqrestore(&dwc->lock, flags);
1194 dev_dbg(chan2dev(&dwc->chan),
1195 "channel already prepared for cyclic DMA\n");
1196 return ERR_PTR(-EBUSY);
1199 retval = ERR_PTR(-EINVAL);
1200 reg_width = dws->reg_width;
1201 periods = buf_len / period_len;
1203 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1204 if (period_len > (DWC_MAX_COUNT << reg_width))
1206 if (unlikely(period_len & ((1 << reg_width) - 1)))
1208 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1210 if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
1213 retval = ERR_PTR(-ENOMEM);
1215 if (periods > NR_DESCS_PER_CHANNEL)
1218 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1222 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1226 for (i = 0; i < periods; i++) {
1227 desc = dwc_desc_get(dwc);
1229 goto out_err_desc_get;
1231 switch (direction) {
1233 desc->lli.dar = dws->tx_reg;
1234 desc->lli.sar = buf_addr + (period_len * i);
1235 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
1236 | DWC_CTLL_DST_WIDTH(reg_width)
1237 | DWC_CTLL_SRC_WIDTH(reg_width)
1240 | DWC_CTLL_FC(dws->fc)
1243 case DMA_FROM_DEVICE:
1244 desc->lli.dar = buf_addr + (period_len * i);
1245 desc->lli.sar = dws->rx_reg;
1246 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
1247 | DWC_CTLL_SRC_WIDTH(reg_width)
1248 | DWC_CTLL_DST_WIDTH(reg_width)
1251 | DWC_CTLL_FC(dws->fc)
1258 desc->lli.ctlhi = (period_len >> reg_width);
1259 cdesc->desc[i] = desc;
1262 last->lli.llp = desc->txd.phys;
1263 dma_sync_single_for_device(chan2parent(chan),
1264 last->txd.phys, sizeof(last->lli),
1271 /* lets make a cyclic list */
1272 last->lli.llp = cdesc->desc[0]->txd.phys;
1273 dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1274 sizeof(last->lli), DMA_TO_DEVICE);
1276 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
1277 "period %zu periods %d\n", buf_addr, buf_len,
1278 period_len, periods);
1280 cdesc->periods = periods;
1287 dwc_desc_put(dwc, cdesc->desc[i]);
1291 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1292 return (struct dw_cyclic_desc *)retval;
1294 EXPORT_SYMBOL(dw_dma_cyclic_prep);
1297 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1298 * @chan: the DMA channel to free
1300 void dw_dma_cyclic_free(struct dma_chan *chan)
1302 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1303 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1304 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1306 unsigned long flags;
1308 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
1313 spin_lock_irqsave(&dwc->lock, flags);
1315 channel_clear_bit(dw, CH_EN, dwc->mask);
1316 while (dma_readl(dw, CH_EN) & dwc->mask)
1319 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1320 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1321 dma_writel(dw, CLEAR.XFER, dwc->mask);
1323 spin_unlock_irqrestore(&dwc->lock, flags);
1325 for (i = 0; i < cdesc->periods; i++)
1326 dwc_desc_put(dwc, cdesc->desc[i]);
1331 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1333 EXPORT_SYMBOL(dw_dma_cyclic_free);
1335 /*----------------------------------------------------------------------*/
1337 static void dw_dma_off(struct dw_dma *dw)
1339 dma_writel(dw, CFG, 0);
1341 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1342 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1343 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1344 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1345 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1347 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1351 static int __init dw_probe(struct platform_device *pdev)
1353 struct dw_dma_platform_data *pdata;
1354 struct resource *io;
1361 pdata = pdev->dev.platform_data;
1362 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1365 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1369 irq = platform_get_irq(pdev, 0);
1373 size = sizeof(struct dw_dma);
1374 size += pdata->nr_channels * sizeof(struct dw_dma_chan);
1375 dw = kzalloc(size, GFP_KERNEL);
1379 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
1384 dw->regs = ioremap(io->start, DW_REGLEN);
1390 dw->clk = clk_get(&pdev->dev, "hclk");
1391 if (IS_ERR(dw->clk)) {
1392 err = PTR_ERR(dw->clk);
1395 clk_enable(dw->clk);
1397 /* force dma off, just in case */
1400 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
1404 platform_set_drvdata(pdev, dw);
1406 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1408 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1410 INIT_LIST_HEAD(&dw->dma.channels);
1411 for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
1412 struct dw_dma_chan *dwc = &dw->chan[i];
1414 dwc->chan.device = &dw->dma;
1415 dwc->chan.chan_id = i;
1416 dwc->chan.cookie = dwc->chan.completed_cookie = 1;
1417 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1418 list_add_tail(&dwc->chan.device_node,
1421 list_add(&dwc->chan.device_node, &dw->dma.channels);
1423 /* 7 is highest priority & 0 is lowest. */
1424 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1425 dwc->priority = 7 - i;
1429 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1430 spin_lock_init(&dwc->lock);
1433 INIT_LIST_HEAD(&dwc->active_list);
1434 INIT_LIST_HEAD(&dwc->queue);
1435 INIT_LIST_HEAD(&dwc->free_list);
1437 channel_clear_bit(dw, CH_EN, dwc->mask);
1440 /* Clear/disable all interrupts on all channels. */
1441 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1442 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1443 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1444 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1445 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1447 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1448 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1449 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1450 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1451 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1453 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1454 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1455 if (pdata->is_private)
1456 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1457 dw->dma.dev = &pdev->dev;
1458 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1459 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1461 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1463 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1464 dw->dma.device_control = dwc_control;
1466 dw->dma.device_tx_status = dwc_tx_status;
1467 dw->dma.device_issue_pending = dwc_issue_pending;
1469 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1471 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1472 dev_name(&pdev->dev), dw->dma.chancnt);
1474 dma_async_device_register(&dw->dma);
1479 clk_disable(dw->clk);
1485 release_resource(io);
1491 static int __exit dw_remove(struct platform_device *pdev)
1493 struct dw_dma *dw = platform_get_drvdata(pdev);
1494 struct dw_dma_chan *dwc, *_dwc;
1495 struct resource *io;
1498 dma_async_device_unregister(&dw->dma);
1500 free_irq(platform_get_irq(pdev, 0), dw);
1501 tasklet_kill(&dw->tasklet);
1503 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1505 list_del(&dwc->chan.device_node);
1506 channel_clear_bit(dw, CH_EN, dwc->mask);
1509 clk_disable(dw->clk);
1515 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1516 release_mem_region(io->start, DW_REGLEN);
1523 static void dw_shutdown(struct platform_device *pdev)
1525 struct dw_dma *dw = platform_get_drvdata(pdev);
1527 dw_dma_off(platform_get_drvdata(pdev));
1528 clk_disable(dw->clk);
1531 static int dw_suspend_noirq(struct device *dev)
1533 struct platform_device *pdev = to_platform_device(dev);
1534 struct dw_dma *dw = platform_get_drvdata(pdev);
1536 dw_dma_off(platform_get_drvdata(pdev));
1537 clk_disable(dw->clk);
1541 static int dw_resume_noirq(struct device *dev)
1543 struct platform_device *pdev = to_platform_device(dev);
1544 struct dw_dma *dw = platform_get_drvdata(pdev);
1546 clk_enable(dw->clk);
1547 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1551 static const struct dev_pm_ops dw_dev_pm_ops = {
1552 .suspend_noirq = dw_suspend_noirq,
1553 .resume_noirq = dw_resume_noirq,
1556 static struct platform_driver dw_driver = {
1557 .remove = __exit_p(dw_remove),
1558 .shutdown = dw_shutdown,
1561 .pm = &dw_dev_pm_ops,
1565 static int __init dw_init(void)
1567 return platform_driver_probe(&dw_driver, dw_probe);
1569 subsys_initcall(dw_init);
1571 static void __exit dw_exit(void)
1573 platform_driver_unregister(&dw_driver);
1575 module_exit(dw_exit);
1577 MODULE_LICENSE("GPL v2");
1578 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1579 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1580 MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");