dmaengine: shdma: add runtime- and system-level power management
[linux-2.6.git] / drivers / dma / shdma.c
1 /*
2  * Renesas SuperH DMA Engine support
3  *
4  * base is drivers/dma/flsdma.c
5  *
6  * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7  * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8  * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9  *
10  * This is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * - DMA of SuperH does not have Hardware DMA chain mode.
16  * - MAX DMA size is 16MB.
17  *
18  */
19
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/sh_dma.h>
30 #include <linux/notifier.h>
31 #include <linux/kdebug.h>
32 #include <linux/spinlock.h>
33 #include <linux/rculist.h>
34 #include "shdma.h"
35
36 /* DMA descriptor control */
37 enum sh_dmae_desc_status {
38         DESC_IDLE,
39         DESC_PREPARED,
40         DESC_SUBMITTED,
41         DESC_COMPLETED, /* completed, have to call callback */
42         DESC_WAITING,   /* callback called, waiting for ack / re-submit */
43 };
44
45 #define NR_DESCS_PER_CHANNEL 32
46 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
47 #define LOG2_DEFAULT_XFER_SIZE  2
48
49 /*
50  * Used for write-side mutual exclusion for the global device list,
51  * read-side synchronization by way of RCU, and per-controller data.
52  */
53 static DEFINE_SPINLOCK(sh_dmae_lock);
54 static LIST_HEAD(sh_dmae_devices);
55
56 /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
57 static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
58
59 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
60
61 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
62 {
63         __raw_writel(data, sh_dc->base + reg / sizeof(u32));
64 }
65
66 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
67 {
68         return __raw_readl(sh_dc->base + reg / sizeof(u32));
69 }
70
71 static u16 dmaor_read(struct sh_dmae_device *shdev)
72 {
73         return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
74 }
75
76 static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
77 {
78         __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
79 }
80
81 /*
82  * Reset DMA controller
83  *
84  * SH7780 has two DMAOR register
85  */
86 static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
87 {
88         unsigned short dmaor;
89         unsigned long flags;
90
91         spin_lock_irqsave(&sh_dmae_lock, flags);
92
93         dmaor = dmaor_read(shdev);
94         dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
95
96         spin_unlock_irqrestore(&sh_dmae_lock, flags);
97 }
98
99 static int sh_dmae_rst(struct sh_dmae_device *shdev)
100 {
101         unsigned short dmaor;
102         unsigned long flags;
103
104         spin_lock_irqsave(&sh_dmae_lock, flags);
105
106         dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
107
108         dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
109
110         dmaor = dmaor_read(shdev);
111
112         spin_unlock_irqrestore(&sh_dmae_lock, flags);
113
114         if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
115                 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
116                 return -EIO;
117         }
118         return 0;
119 }
120
121 static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
122 {
123         u32 chcr = sh_dmae_readl(sh_chan, CHCR);
124
125         if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
126                 return true; /* working */
127
128         return false; /* waiting */
129 }
130
131 static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
132 {
133         struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
134                                                 struct sh_dmae_device, common);
135         struct sh_dmae_pdata *pdata = shdev->pdata;
136         int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
137                 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
138
139         if (cnt >= pdata->ts_shift_num)
140                 cnt = 0;
141
142         return pdata->ts_shift[cnt];
143 }
144
145 static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
146 {
147         struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
148                                                 struct sh_dmae_device, common);
149         struct sh_dmae_pdata *pdata = shdev->pdata;
150         int i;
151
152         for (i = 0; i < pdata->ts_shift_num; i++)
153                 if (pdata->ts_shift[i] == l2size)
154                         break;
155
156         if (i == pdata->ts_shift_num)
157                 i = 0;
158
159         return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
160                 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
161 }
162
163 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
164 {
165         sh_dmae_writel(sh_chan, hw->sar, SAR);
166         sh_dmae_writel(sh_chan, hw->dar, DAR);
167         sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
168 }
169
170 static void dmae_start(struct sh_dmae_chan *sh_chan)
171 {
172         u32 chcr = sh_dmae_readl(sh_chan, CHCR);
173
174         chcr |= CHCR_DE | CHCR_IE;
175         sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
176 }
177
178 static void dmae_halt(struct sh_dmae_chan *sh_chan)
179 {
180         u32 chcr = sh_dmae_readl(sh_chan, CHCR);
181
182         chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
183         sh_dmae_writel(sh_chan, chcr, CHCR);
184 }
185
186 static void dmae_init(struct sh_dmae_chan *sh_chan)
187 {
188         /*
189          * Default configuration for dual address memory-memory transfer.
190          * 0x400 represents auto-request.
191          */
192         u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
193                                                    LOG2_DEFAULT_XFER_SIZE);
194         sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
195         sh_dmae_writel(sh_chan, chcr, CHCR);
196 }
197
198 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
199 {
200         /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
201         if (dmae_is_busy(sh_chan))
202                 return -EBUSY;
203
204         sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
205         sh_dmae_writel(sh_chan, val, CHCR);
206
207         return 0;
208 }
209
210 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
211 {
212         struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
213                                                 struct sh_dmae_device, common);
214         struct sh_dmae_pdata *pdata = shdev->pdata;
215         const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
216         u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
217         int shift = chan_pdata->dmars_bit;
218
219         if (dmae_is_busy(sh_chan))
220                 return -EBUSY;
221
222         __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
223                      addr);
224
225         return 0;
226 }
227
228 static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
229 {
230         struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
231         struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
232         dma_async_tx_callback callback = tx->callback;
233         dma_cookie_t cookie;
234
235         spin_lock_bh(&sh_chan->desc_lock);
236
237         cookie = sh_chan->common.cookie;
238         cookie++;
239         if (cookie < 0)
240                 cookie = 1;
241
242         sh_chan->common.cookie = cookie;
243         tx->cookie = cookie;
244
245         /* Mark all chunks of this descriptor as submitted, move to the queue */
246         list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
247                 /*
248                  * All chunks are on the global ld_free, so, we have to find
249                  * the end of the chain ourselves
250                  */
251                 if (chunk != desc && (chunk->mark == DESC_IDLE ||
252                                       chunk->async_tx.cookie > 0 ||
253                                       chunk->async_tx.cookie == -EBUSY ||
254                                       &chunk->node == &sh_chan->ld_free))
255                         break;
256                 chunk->mark = DESC_SUBMITTED;
257                 /* Callback goes to the last chunk */
258                 chunk->async_tx.callback = NULL;
259                 chunk->cookie = cookie;
260                 list_move_tail(&chunk->node, &sh_chan->ld_queue);
261                 last = chunk;
262         }
263
264         last->async_tx.callback = callback;
265         last->async_tx.callback_param = tx->callback_param;
266
267         dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
268                 tx->cookie, &last->async_tx, sh_chan->id,
269                 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
270
271         spin_unlock_bh(&sh_chan->desc_lock);
272
273         return cookie;
274 }
275
276 /* Called with desc_lock held */
277 static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
278 {
279         struct sh_desc *desc;
280
281         list_for_each_entry(desc, &sh_chan->ld_free, node)
282                 if (desc->mark != DESC_PREPARED) {
283                         BUG_ON(desc->mark != DESC_IDLE);
284                         list_del(&desc->node);
285                         return desc;
286                 }
287
288         return NULL;
289 }
290
291 static const struct sh_dmae_slave_config *sh_dmae_find_slave(
292         struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
293 {
294         struct dma_device *dma_dev = sh_chan->common.device;
295         struct sh_dmae_device *shdev = container_of(dma_dev,
296                                         struct sh_dmae_device, common);
297         struct sh_dmae_pdata *pdata = shdev->pdata;
298         int i;
299
300         if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
301                 return NULL;
302
303         for (i = 0; i < pdata->slave_num; i++)
304                 if (pdata->slave[i].slave_id == param->slave_id)
305                         return pdata->slave + i;
306
307         return NULL;
308 }
309
310 static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
311 {
312         struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
313         struct sh_desc *desc;
314         struct sh_dmae_slave *param = chan->private;
315         int ret;
316
317         pm_runtime_get_sync(sh_chan->dev);
318
319         /*
320          * This relies on the guarantee from dmaengine that alloc_chan_resources
321          * never runs concurrently with itself or free_chan_resources.
322          */
323         if (param) {
324                 const struct sh_dmae_slave_config *cfg;
325
326                 cfg = sh_dmae_find_slave(sh_chan, param);
327                 if (!cfg) {
328                         ret = -EINVAL;
329                         goto efindslave;
330                 }
331
332                 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
333                         ret = -EBUSY;
334                         goto etestused;
335                 }
336
337                 param->config = cfg;
338
339                 dmae_set_dmars(sh_chan, cfg->mid_rid);
340                 dmae_set_chcr(sh_chan, cfg->chcr);
341         } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
342                 dmae_init(sh_chan);
343         }
344
345         spin_lock_bh(&sh_chan->desc_lock);
346         while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
347                 spin_unlock_bh(&sh_chan->desc_lock);
348                 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
349                 if (!desc) {
350                         spin_lock_bh(&sh_chan->desc_lock);
351                         break;
352                 }
353                 dma_async_tx_descriptor_init(&desc->async_tx,
354                                         &sh_chan->common);
355                 desc->async_tx.tx_submit = sh_dmae_tx_submit;
356                 desc->mark = DESC_IDLE;
357
358                 spin_lock_bh(&sh_chan->desc_lock);
359                 list_add(&desc->node, &sh_chan->ld_free);
360                 sh_chan->descs_allocated++;
361         }
362         spin_unlock_bh(&sh_chan->desc_lock);
363
364         if (!sh_chan->descs_allocated) {
365                 ret = -ENOMEM;
366                 goto edescalloc;
367         }
368
369         return sh_chan->descs_allocated;
370
371 edescalloc:
372         if (param)
373                 clear_bit(param->slave_id, sh_dmae_slave_used);
374 etestused:
375 efindslave:
376         pm_runtime_put(sh_chan->dev);
377         return ret;
378 }
379
380 /*
381  * sh_dma_free_chan_resources - Free all resources of the channel.
382  */
383 static void sh_dmae_free_chan_resources(struct dma_chan *chan)
384 {
385         struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
386         struct sh_desc *desc, *_desc;
387         LIST_HEAD(list);
388         int descs = sh_chan->descs_allocated;
389
390         /* Protect against ISR */
391         spin_lock_irq(&sh_chan->desc_lock);
392         dmae_halt(sh_chan);
393         spin_unlock_irq(&sh_chan->desc_lock);
394
395         /* Now no new interrupts will occur */
396
397         /* Prepared and not submitted descriptors can still be on the queue */
398         if (!list_empty(&sh_chan->ld_queue))
399                 sh_dmae_chan_ld_cleanup(sh_chan, true);
400
401         if (chan->private) {
402                 /* The caller is holding dma_list_mutex */
403                 struct sh_dmae_slave *param = chan->private;
404                 clear_bit(param->slave_id, sh_dmae_slave_used);
405                 chan->private = NULL;
406         }
407
408         spin_lock_bh(&sh_chan->desc_lock);
409
410         list_splice_init(&sh_chan->ld_free, &list);
411         sh_chan->descs_allocated = 0;
412
413         spin_unlock_bh(&sh_chan->desc_lock);
414
415         if (descs > 0)
416                 pm_runtime_put(sh_chan->dev);
417
418         list_for_each_entry_safe(desc, _desc, &list, node)
419                 kfree(desc);
420 }
421
422 /**
423  * sh_dmae_add_desc - get, set up and return one transfer descriptor
424  * @sh_chan:    DMA channel
425  * @flags:      DMA transfer flags
426  * @dest:       destination DMA address, incremented when direction equals
427  *              DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
428  * @src:        source DMA address, incremented when direction equals
429  *              DMA_TO_DEVICE or DMA_BIDIRECTIONAL
430  * @len:        DMA transfer length
431  * @first:      if NULL, set to the current descriptor and cookie set to -EBUSY
432  * @direction:  needed for slave DMA to decide which address to keep constant,
433  *              equals DMA_BIDIRECTIONAL for MEMCPY
434  * Returns 0 or an error
435  * Locks: called with desc_lock held
436  */
437 static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
438         unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
439         struct sh_desc **first, enum dma_data_direction direction)
440 {
441         struct sh_desc *new;
442         size_t copy_size;
443
444         if (!*len)
445                 return NULL;
446
447         /* Allocate the link descriptor from the free list */
448         new = sh_dmae_get_desc(sh_chan);
449         if (!new) {
450                 dev_err(sh_chan->dev, "No free link descriptor available\n");
451                 return NULL;
452         }
453
454         copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
455
456         new->hw.sar = *src;
457         new->hw.dar = *dest;
458         new->hw.tcr = copy_size;
459
460         if (!*first) {
461                 /* First desc */
462                 new->async_tx.cookie = -EBUSY;
463                 *first = new;
464         } else {
465                 /* Other desc - invisible to the user */
466                 new->async_tx.cookie = -EINVAL;
467         }
468
469         dev_dbg(sh_chan->dev,
470                 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
471                 copy_size, *len, *src, *dest, &new->async_tx,
472                 new->async_tx.cookie, sh_chan->xmit_shift);
473
474         new->mark = DESC_PREPARED;
475         new->async_tx.flags = flags;
476         new->direction = direction;
477
478         *len -= copy_size;
479         if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
480                 *src += copy_size;
481         if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
482                 *dest += copy_size;
483
484         return new;
485 }
486
487 /*
488  * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
489  *
490  * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
491  * converted to scatter-gather to guarantee consistent locking and a correct
492  * list manipulation. For slave DMA direction carries the usual meaning, and,
493  * logically, the SG list is RAM and the addr variable contains slave address,
494  * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
495  * and the SG list contains only one element and points at the source buffer.
496  */
497 static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
498         struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
499         enum dma_data_direction direction, unsigned long flags)
500 {
501         struct scatterlist *sg;
502         struct sh_desc *first = NULL, *new = NULL /* compiler... */;
503         LIST_HEAD(tx_list);
504         int chunks = 0;
505         int i;
506
507         if (!sg_len)
508                 return NULL;
509
510         for_each_sg(sgl, sg, sg_len, i)
511                 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
512                         (SH_DMA_TCR_MAX + 1);
513
514         /* Have to lock the whole loop to protect against concurrent release */
515         spin_lock_bh(&sh_chan->desc_lock);
516
517         /*
518          * Chaining:
519          * first descriptor is what user is dealing with in all API calls, its
520          *      cookie is at first set to -EBUSY, at tx-submit to a positive
521          *      number
522          * if more than one chunk is needed further chunks have cookie = -EINVAL
523          * the last chunk, if not equal to the first, has cookie = -ENOSPC
524          * all chunks are linked onto the tx_list head with their .node heads
525          *      only during this function, then they are immediately spliced
526          *      back onto the free list in form of a chain
527          */
528         for_each_sg(sgl, sg, sg_len, i) {
529                 dma_addr_t sg_addr = sg_dma_address(sg);
530                 size_t len = sg_dma_len(sg);
531
532                 if (!len)
533                         goto err_get_desc;
534
535                 do {
536                         dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
537                                 i, sg, len, (unsigned long long)sg_addr);
538
539                         if (direction == DMA_FROM_DEVICE)
540                                 new = sh_dmae_add_desc(sh_chan, flags,
541                                                 &sg_addr, addr, &len, &first,
542                                                 direction);
543                         else
544                                 new = sh_dmae_add_desc(sh_chan, flags,
545                                                 addr, &sg_addr, &len, &first,
546                                                 direction);
547                         if (!new)
548                                 goto err_get_desc;
549
550                         new->chunks = chunks--;
551                         list_add_tail(&new->node, &tx_list);
552                 } while (len);
553         }
554
555         if (new != first)
556                 new->async_tx.cookie = -ENOSPC;
557
558         /* Put them back on the free list, so, they don't get lost */
559         list_splice_tail(&tx_list, &sh_chan->ld_free);
560
561         spin_unlock_bh(&sh_chan->desc_lock);
562
563         return &first->async_tx;
564
565 err_get_desc:
566         list_for_each_entry(new, &tx_list, node)
567                 new->mark = DESC_IDLE;
568         list_splice(&tx_list, &sh_chan->ld_free);
569
570         spin_unlock_bh(&sh_chan->desc_lock);
571
572         return NULL;
573 }
574
575 static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
576         struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
577         size_t len, unsigned long flags)
578 {
579         struct sh_dmae_chan *sh_chan;
580         struct scatterlist sg;
581
582         if (!chan || !len)
583                 return NULL;
584
585         sh_chan = to_sh_chan(chan);
586
587         sg_init_table(&sg, 1);
588         sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
589                     offset_in_page(dma_src));
590         sg_dma_address(&sg) = dma_src;
591         sg_dma_len(&sg) = len;
592
593         return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
594                                flags);
595 }
596
597 static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
598         struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
599         enum dma_data_direction direction, unsigned long flags)
600 {
601         struct sh_dmae_slave *param;
602         struct sh_dmae_chan *sh_chan;
603         dma_addr_t slave_addr;
604
605         if (!chan)
606                 return NULL;
607
608         sh_chan = to_sh_chan(chan);
609         param = chan->private;
610
611         /* Someone calling slave DMA on a public channel? */
612         if (!param || !sg_len) {
613                 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
614                          __func__, param, sg_len, param ? param->slave_id : -1);
615                 return NULL;
616         }
617
618         slave_addr = param->config->addr;
619
620         /*
621          * if (param != NULL), this is a successfully requested slave channel,
622          * therefore param->config != NULL too.
623          */
624         return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
625                                direction, flags);
626 }
627
628 static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
629                            unsigned long arg)
630 {
631         struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
632
633         /* Only supports DMA_TERMINATE_ALL */
634         if (cmd != DMA_TERMINATE_ALL)
635                 return -ENXIO;
636
637         if (!chan)
638                 return -EINVAL;
639
640         spin_lock_bh(&sh_chan->desc_lock);
641         dmae_halt(sh_chan);
642
643         if (!list_empty(&sh_chan->ld_queue)) {
644                 /* Record partial transfer */
645                 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
646                                                   struct sh_desc, node);
647                 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
648                         sh_chan->xmit_shift;
649
650         }
651         spin_unlock_bh(&sh_chan->desc_lock);
652
653         sh_dmae_chan_ld_cleanup(sh_chan, true);
654
655         return 0;
656 }
657
658 static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
659 {
660         struct sh_desc *desc, *_desc;
661         /* Is the "exposed" head of a chain acked? */
662         bool head_acked = false;
663         dma_cookie_t cookie = 0;
664         dma_async_tx_callback callback = NULL;
665         void *param = NULL;
666
667         spin_lock_bh(&sh_chan->desc_lock);
668         list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
669                 struct dma_async_tx_descriptor *tx = &desc->async_tx;
670
671                 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
672                 BUG_ON(desc->mark != DESC_SUBMITTED &&
673                        desc->mark != DESC_COMPLETED &&
674                        desc->mark != DESC_WAITING);
675
676                 /*
677                  * queue is ordered, and we use this loop to (1) clean up all
678                  * completed descriptors, and to (2) update descriptor flags of
679                  * any chunks in a (partially) completed chain
680                  */
681                 if (!all && desc->mark == DESC_SUBMITTED &&
682                     desc->cookie != cookie)
683                         break;
684
685                 if (tx->cookie > 0)
686                         cookie = tx->cookie;
687
688                 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
689                         if (sh_chan->completed_cookie != desc->cookie - 1)
690                                 dev_dbg(sh_chan->dev,
691                                         "Completing cookie %d, expected %d\n",
692                                         desc->cookie,
693                                         sh_chan->completed_cookie + 1);
694                         sh_chan->completed_cookie = desc->cookie;
695                 }
696
697                 /* Call callback on the last chunk */
698                 if (desc->mark == DESC_COMPLETED && tx->callback) {
699                         desc->mark = DESC_WAITING;
700                         callback = tx->callback;
701                         param = tx->callback_param;
702                         dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
703                                 tx->cookie, tx, sh_chan->id);
704                         BUG_ON(desc->chunks != 1);
705                         break;
706                 }
707
708                 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
709                         if (desc->mark == DESC_COMPLETED) {
710                                 BUG_ON(tx->cookie < 0);
711                                 desc->mark = DESC_WAITING;
712                         }
713                         head_acked = async_tx_test_ack(tx);
714                 } else {
715                         switch (desc->mark) {
716                         case DESC_COMPLETED:
717                                 desc->mark = DESC_WAITING;
718                                 /* Fall through */
719                         case DESC_WAITING:
720                                 if (head_acked)
721                                         async_tx_ack(&desc->async_tx);
722                         }
723                 }
724
725                 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
726                         tx, tx->cookie);
727
728                 if (((desc->mark == DESC_COMPLETED ||
729                       desc->mark == DESC_WAITING) &&
730                      async_tx_test_ack(&desc->async_tx)) || all) {
731                         /* Remove from ld_queue list */
732                         desc->mark = DESC_IDLE;
733                         list_move(&desc->node, &sh_chan->ld_free);
734                 }
735         }
736
737         if (all && !callback)
738                 /*
739                  * Terminating and the loop completed normally: forgive
740                  * uncompleted cookies
741                  */
742                 sh_chan->completed_cookie = sh_chan->common.cookie;
743
744         spin_unlock_bh(&sh_chan->desc_lock);
745
746         if (callback)
747                 callback(param);
748
749         return callback;
750 }
751
752 /*
753  * sh_chan_ld_cleanup - Clean up link descriptors
754  *
755  * This function cleans up the ld_queue of DMA channel.
756  */
757 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
758 {
759         while (__ld_cleanup(sh_chan, all))
760                 ;
761 }
762
763 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
764 {
765         struct sh_desc *desc;
766
767         spin_lock_bh(&sh_chan->desc_lock);
768         /* DMA work check */
769         if (dmae_is_busy(sh_chan)) {
770                 spin_unlock_bh(&sh_chan->desc_lock);
771                 return;
772         }
773
774         /* Find the first not transferred descriptor */
775         list_for_each_entry(desc, &sh_chan->ld_queue, node)
776                 if (desc->mark == DESC_SUBMITTED) {
777                         dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
778                                 desc->async_tx.cookie, sh_chan->id,
779                                 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
780                         /* Get the ld start address from ld_queue */
781                         dmae_set_reg(sh_chan, &desc->hw);
782                         dmae_start(sh_chan);
783                         break;
784                 }
785
786         spin_unlock_bh(&sh_chan->desc_lock);
787 }
788
789 static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
790 {
791         struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
792         sh_chan_xfer_ld_queue(sh_chan);
793 }
794
795 static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
796                                         dma_cookie_t cookie,
797                                         struct dma_tx_state *txstate)
798 {
799         struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
800         dma_cookie_t last_used;
801         dma_cookie_t last_complete;
802         enum dma_status status;
803
804         sh_dmae_chan_ld_cleanup(sh_chan, false);
805
806         /* First read completed cookie to avoid a skew */
807         last_complete = sh_chan->completed_cookie;
808         rmb();
809         last_used = chan->cookie;
810         BUG_ON(last_complete < 0);
811         dma_set_tx_state(txstate, last_complete, last_used, 0);
812
813         spin_lock_bh(&sh_chan->desc_lock);
814
815         status = dma_async_is_complete(cookie, last_complete, last_used);
816
817         /*
818          * If we don't find cookie on the queue, it has been aborted and we have
819          * to report error
820          */
821         if (status != DMA_SUCCESS) {
822                 struct sh_desc *desc;
823                 status = DMA_ERROR;
824                 list_for_each_entry(desc, &sh_chan->ld_queue, node)
825                         if (desc->cookie == cookie) {
826                                 status = DMA_IN_PROGRESS;
827                                 break;
828                         }
829         }
830
831         spin_unlock_bh(&sh_chan->desc_lock);
832
833         return status;
834 }
835
836 static irqreturn_t sh_dmae_interrupt(int irq, void *data)
837 {
838         irqreturn_t ret = IRQ_NONE;
839         struct sh_dmae_chan *sh_chan = data;
840         u32 chcr;
841
842         spin_lock(&sh_chan->desc_lock);
843
844         chcr = sh_dmae_readl(sh_chan, CHCR);
845
846         if (chcr & CHCR_TE) {
847                 /* DMA stop */
848                 dmae_halt(sh_chan);
849
850                 ret = IRQ_HANDLED;
851                 tasklet_schedule(&sh_chan->tasklet);
852         }
853
854         spin_unlock(&sh_chan->desc_lock);
855
856         return ret;
857 }
858
859 /* Called from error IRQ or NMI */
860 static bool sh_dmae_reset(struct sh_dmae_device *shdev)
861 {
862         unsigned int handled = 0;
863         int i;
864
865         /* halt the dma controller */
866         sh_dmae_ctl_stop(shdev);
867
868         /* We cannot detect, which channel caused the error, have to reset all */
869         for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
870                 struct sh_dmae_chan *sh_chan = shdev->chan[i];
871                 struct sh_desc *desc;
872                 LIST_HEAD(dl);
873
874                 if (!sh_chan)
875                         continue;
876
877                 spin_lock(&sh_chan->desc_lock);
878
879                 /* Stop the channel */
880                 dmae_halt(sh_chan);
881
882                 list_splice_init(&sh_chan->ld_queue, &dl);
883
884                 spin_unlock(&sh_chan->desc_lock);
885
886                 /* Complete all  */
887                 list_for_each_entry(desc, &dl, node) {
888                         struct dma_async_tx_descriptor *tx = &desc->async_tx;
889                         desc->mark = DESC_IDLE;
890                         if (tx->callback)
891                                 tx->callback(tx->callback_param);
892                 }
893
894                 spin_lock(&sh_chan->desc_lock);
895                 list_splice(&dl, &sh_chan->ld_free);
896                 spin_unlock(&sh_chan->desc_lock);
897
898                 handled++;
899         }
900
901         sh_dmae_rst(shdev);
902
903         return !!handled;
904 }
905
906 static irqreturn_t sh_dmae_err(int irq, void *data)
907 {
908         struct sh_dmae_device *shdev = data;
909
910         if (!(dmaor_read(shdev) & DMAOR_AE))
911                 return IRQ_NONE;
912
913         sh_dmae_reset(data);
914         return IRQ_HANDLED;
915 }
916
917 static void dmae_do_tasklet(unsigned long data)
918 {
919         struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
920         struct sh_desc *desc;
921         u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
922         u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
923
924         spin_lock(&sh_chan->desc_lock);
925         list_for_each_entry(desc, &sh_chan->ld_queue, node) {
926                 if (desc->mark == DESC_SUBMITTED &&
927                     ((desc->direction == DMA_FROM_DEVICE &&
928                       (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
929                      (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
930                         dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
931                                 desc->async_tx.cookie, &desc->async_tx,
932                                 desc->hw.dar);
933                         desc->mark = DESC_COMPLETED;
934                         break;
935                 }
936         }
937         spin_unlock(&sh_chan->desc_lock);
938
939         /* Next desc */
940         sh_chan_xfer_ld_queue(sh_chan);
941         sh_dmae_chan_ld_cleanup(sh_chan, false);
942 }
943
944 static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
945 {
946         /* Fast path out if NMIF is not asserted for this controller */
947         if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
948                 return false;
949
950         return sh_dmae_reset(shdev);
951 }
952
953 static int sh_dmae_nmi_handler(struct notifier_block *self,
954                                unsigned long cmd, void *data)
955 {
956         struct sh_dmae_device *shdev;
957         int ret = NOTIFY_DONE;
958         bool triggered;
959
960         /*
961          * Only concern ourselves with NMI events.
962          *
963          * Normally we would check the die chain value, but as this needs
964          * to be architecture independent, check for NMI context instead.
965          */
966         if (!in_nmi())
967                 return NOTIFY_DONE;
968
969         rcu_read_lock();
970         list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
971                 /*
972                  * Only stop if one of the controllers has NMIF asserted,
973                  * we do not want to interfere with regular address error
974                  * handling or NMI events that don't concern the DMACs.
975                  */
976                 triggered = sh_dmae_nmi_notify(shdev);
977                 if (triggered == true)
978                         ret = NOTIFY_OK;
979         }
980         rcu_read_unlock();
981
982         return ret;
983 }
984
985 static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
986         .notifier_call  = sh_dmae_nmi_handler,
987
988         /* Run before NMI debug handler and KGDB */
989         .priority       = 1,
990 };
991
992 static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
993                                         int irq, unsigned long flags)
994 {
995         int err;
996         const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
997         struct platform_device *pdev = to_platform_device(shdev->common.dev);
998         struct sh_dmae_chan *new_sh_chan;
999
1000         /* alloc channel */
1001         new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
1002         if (!new_sh_chan) {
1003                 dev_err(shdev->common.dev,
1004                         "No free memory for allocating dma channels!\n");
1005                 return -ENOMEM;
1006         }
1007
1008         /* copy struct dma_device */
1009         new_sh_chan->common.device = &shdev->common;
1010
1011         new_sh_chan->dev = shdev->common.dev;
1012         new_sh_chan->id = id;
1013         new_sh_chan->irq = irq;
1014         new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
1015
1016         /* Init DMA tasklet */
1017         tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
1018                         (unsigned long)new_sh_chan);
1019
1020         spin_lock_init(&new_sh_chan->desc_lock);
1021
1022         /* Init descripter manage list */
1023         INIT_LIST_HEAD(&new_sh_chan->ld_queue);
1024         INIT_LIST_HEAD(&new_sh_chan->ld_free);
1025
1026         /* Add the channel to DMA device channel list */
1027         list_add_tail(&new_sh_chan->common.device_node,
1028                         &shdev->common.channels);
1029         shdev->common.chancnt++;
1030
1031         if (pdev->id >= 0)
1032                 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1033                          "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
1034         else
1035                 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1036                          "sh-dma%d", new_sh_chan->id);
1037
1038         /* set up channel irq */
1039         err = request_irq(irq, &sh_dmae_interrupt, flags,
1040                           new_sh_chan->dev_id, new_sh_chan);
1041         if (err) {
1042                 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
1043                         "with return %d\n", id, err);
1044                 goto err_no_irq;
1045         }
1046
1047         shdev->chan[id] = new_sh_chan;
1048         return 0;
1049
1050 err_no_irq:
1051         /* remove from dmaengine device node */
1052         list_del(&new_sh_chan->common.device_node);
1053         kfree(new_sh_chan);
1054         return err;
1055 }
1056
1057 static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
1058 {
1059         int i;
1060
1061         for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
1062                 if (shdev->chan[i]) {
1063                         struct sh_dmae_chan *sh_chan = shdev->chan[i];
1064
1065                         free_irq(sh_chan->irq, sh_chan);
1066
1067                         list_del(&sh_chan->common.device_node);
1068                         kfree(sh_chan);
1069                         shdev->chan[i] = NULL;
1070                 }
1071         }
1072         shdev->common.chancnt = 0;
1073 }
1074
1075 static int __init sh_dmae_probe(struct platform_device *pdev)
1076 {
1077         struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
1078         unsigned long irqflags = IRQF_DISABLED,
1079                 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
1080         unsigned long flags;
1081         int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
1082         int err, i, irq_cnt = 0, irqres = 0;
1083         struct sh_dmae_device *shdev;
1084         struct resource *chan, *dmars, *errirq_res, *chanirq_res;
1085
1086         /* get platform data */
1087         if (!pdata || !pdata->channel_num)
1088                 return -ENODEV;
1089
1090         chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1091         /* DMARS area is optional, if absent, this controller cannot do slave DMA */
1092         dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1093         /*
1094          * IRQ resources:
1095          * 1. there always must be at least one IRQ IO-resource. On SH4 it is
1096          *    the error IRQ, in which case it is the only IRQ in this resource:
1097          *    start == end. If it is the only IRQ resource, all channels also
1098          *    use the same IRQ.
1099          * 2. DMA channel IRQ resources can be specified one per resource or in
1100          *    ranges (start != end)
1101          * 3. iff all events (channels and, optionally, error) on this
1102          *    controller use the same IRQ, only one IRQ resource can be
1103          *    specified, otherwise there must be one IRQ per channel, even if
1104          *    some of them are equal
1105          * 4. if all IRQs on this controller are equal or if some specific IRQs
1106          *    specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
1107          *    requested with the IRQF_SHARED flag
1108          */
1109         errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1110         if (!chan || !errirq_res)
1111                 return -ENODEV;
1112
1113         if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
1114                 dev_err(&pdev->dev, "DMAC register region already claimed\n");
1115                 return -EBUSY;
1116         }
1117
1118         if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
1119                 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
1120                 err = -EBUSY;
1121                 goto ermrdmars;
1122         }
1123
1124         err = -ENOMEM;
1125         shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
1126         if (!shdev) {
1127                 dev_err(&pdev->dev, "Not enough memory\n");
1128                 goto ealloc;
1129         }
1130
1131         shdev->chan_reg = ioremap(chan->start, resource_size(chan));
1132         if (!shdev->chan_reg)
1133                 goto emapchan;
1134         if (dmars) {
1135                 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1136                 if (!shdev->dmars)
1137                         goto emapdmars;
1138         }
1139
1140         /* platform data */
1141         shdev->pdata = pdata;
1142
1143         pm_runtime_enable(&pdev->dev);
1144         pm_runtime_get_sync(&pdev->dev);
1145
1146         spin_lock_irqsave(&sh_dmae_lock, flags);
1147         list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
1148         spin_unlock_irqrestore(&sh_dmae_lock, flags);
1149
1150         /* reset dma controller - only needed as a test */
1151         err = sh_dmae_rst(shdev);
1152         if (err)
1153                 goto rst_err;
1154
1155         INIT_LIST_HEAD(&shdev->common.channels);
1156
1157         dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
1158         if (dmars)
1159                 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
1160
1161         shdev->common.device_alloc_chan_resources
1162                 = sh_dmae_alloc_chan_resources;
1163         shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1164         shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
1165         shdev->common.device_tx_status = sh_dmae_tx_status;
1166         shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
1167
1168         /* Compulsory for DMA_SLAVE fields */
1169         shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
1170         shdev->common.device_control = sh_dmae_control;
1171
1172         shdev->common.dev = &pdev->dev;
1173         /* Default transfer size of 32 bytes requires 32-byte alignment */
1174         shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
1175
1176 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1177         chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1178
1179         if (!chanirq_res)
1180                 chanirq_res = errirq_res;
1181         else
1182                 irqres++;
1183
1184         if (chanirq_res == errirq_res ||
1185             (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
1186                 irqflags = IRQF_SHARED;
1187
1188         errirq = errirq_res->start;
1189
1190         err = request_irq(errirq, sh_dmae_err, irqflags,
1191                           "DMAC Address Error", shdev);
1192         if (err) {
1193                 dev_err(&pdev->dev,
1194                         "DMA failed requesting irq #%d, error %d\n",
1195                         errirq, err);
1196                 goto eirq_err;
1197         }
1198
1199 #else
1200         chanirq_res = errirq_res;
1201 #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
1202
1203         if (chanirq_res->start == chanirq_res->end &&
1204             !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1205                 /* Special case - all multiplexed */
1206                 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1207                         chan_irq[irq_cnt] = chanirq_res->start;
1208                         chan_flag[irq_cnt] = IRQF_SHARED;
1209                 }
1210         } else {
1211                 do {
1212                         for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1213                                 if ((errirq_res->flags & IORESOURCE_BITS) ==
1214                                     IORESOURCE_IRQ_SHAREABLE)
1215                                         chan_flag[irq_cnt] = IRQF_SHARED;
1216                                 else
1217                                         chan_flag[irq_cnt] = IRQF_DISABLED;
1218                                 dev_dbg(&pdev->dev,
1219                                         "Found IRQ %d for channel %d\n",
1220                                         i, irq_cnt);
1221                                 chan_irq[irq_cnt++] = i;
1222                         }
1223                         chanirq_res = platform_get_resource(pdev,
1224                                                 IORESOURCE_IRQ, ++irqres);
1225                 } while (irq_cnt < pdata->channel_num && chanirq_res);
1226         }
1227
1228         if (irq_cnt < pdata->channel_num)
1229                 goto eirqres;
1230
1231         /* Create DMA Channel */
1232         for (i = 0; i < pdata->channel_num; i++) {
1233                 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
1234                 if (err)
1235                         goto chan_probe_err;
1236         }
1237
1238         pm_runtime_put(&pdev->dev);
1239
1240         platform_set_drvdata(pdev, shdev);
1241         dma_async_device_register(&shdev->common);
1242
1243         return err;
1244
1245 chan_probe_err:
1246         sh_dmae_chan_remove(shdev);
1247 eirqres:
1248 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1249         free_irq(errirq, shdev);
1250 eirq_err:
1251 #endif
1252 rst_err:
1253         spin_lock_irqsave(&sh_dmae_lock, flags);
1254         list_del_rcu(&shdev->node);
1255         spin_unlock_irqrestore(&sh_dmae_lock, flags);
1256
1257         pm_runtime_put(&pdev->dev);
1258         pm_runtime_disable(&pdev->dev);
1259
1260         if (dmars)
1261                 iounmap(shdev->dmars);
1262 emapdmars:
1263         iounmap(shdev->chan_reg);
1264 emapchan:
1265         kfree(shdev);
1266 ealloc:
1267         if (dmars)
1268                 release_mem_region(dmars->start, resource_size(dmars));
1269 ermrdmars:
1270         release_mem_region(chan->start, resource_size(chan));
1271
1272         return err;
1273 }
1274
1275 static int __exit sh_dmae_remove(struct platform_device *pdev)
1276 {
1277         struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1278         struct resource *res;
1279         unsigned long flags;
1280         int errirq = platform_get_irq(pdev, 0);
1281
1282         dma_async_device_unregister(&shdev->common);
1283
1284         if (errirq > 0)
1285                 free_irq(errirq, shdev);
1286
1287         spin_lock_irqsave(&sh_dmae_lock, flags);
1288         list_del_rcu(&shdev->node);
1289         spin_unlock_irqrestore(&sh_dmae_lock, flags);
1290
1291         /* channel data remove */
1292         sh_dmae_chan_remove(shdev);
1293
1294         pm_runtime_disable(&pdev->dev);
1295
1296         if (shdev->dmars)
1297                 iounmap(shdev->dmars);
1298         iounmap(shdev->chan_reg);
1299
1300         kfree(shdev);
1301
1302         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1303         if (res)
1304                 release_mem_region(res->start, resource_size(res));
1305         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1306         if (res)
1307                 release_mem_region(res->start, resource_size(res));
1308
1309         return 0;
1310 }
1311
1312 static void sh_dmae_shutdown(struct platform_device *pdev)
1313 {
1314         struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1315         sh_dmae_ctl_stop(shdev);
1316 }
1317
1318 static int sh_dmae_runtime_suspend(struct device *dev)
1319 {
1320         return 0;
1321 }
1322
1323 static int sh_dmae_runtime_resume(struct device *dev)
1324 {
1325         struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1326
1327         return sh_dmae_rst(shdev);
1328 }
1329
1330 #ifdef CONFIG_PM
1331 static int sh_dmae_suspend(struct device *dev)
1332 {
1333         struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1334         int i;
1335
1336         for (i = 0; i < shdev->pdata->channel_num; i++) {
1337                 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1338                 if (sh_chan->descs_allocated)
1339                         sh_chan->pm_error = pm_runtime_put_sync(dev);
1340         }
1341
1342         return 0;
1343 }
1344
1345 static int sh_dmae_resume(struct device *dev)
1346 {
1347         struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1348         int i;
1349
1350         for (i = 0; i < shdev->pdata->channel_num; i++) {
1351                 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1352                 struct sh_dmae_slave *param = sh_chan->common.private;
1353
1354                 if (!sh_chan->descs_allocated)
1355                         continue;
1356
1357                 if (!sh_chan->pm_error)
1358                         pm_runtime_get_sync(dev);
1359
1360                 if (param) {
1361                         const struct sh_dmae_slave_config *cfg = param->config;
1362                         dmae_set_dmars(sh_chan, cfg->mid_rid);
1363                         dmae_set_chcr(sh_chan, cfg->chcr);
1364                 } else {
1365                         dmae_init(sh_chan);
1366                 }
1367         }
1368
1369         return 0;
1370 }
1371 #else
1372 #define sh_dmae_suspend NULL
1373 #define sh_dmae_resume NULL
1374 #endif
1375
1376 const struct dev_pm_ops sh_dmae_pm = {
1377         .suspend                = sh_dmae_suspend,
1378         .resume                 = sh_dmae_resume,
1379         .runtime_suspend        = sh_dmae_runtime_suspend,
1380         .runtime_resume         = sh_dmae_runtime_resume,
1381 };
1382
1383 static struct platform_driver sh_dmae_driver = {
1384         .remove         = __exit_p(sh_dmae_remove),
1385         .shutdown       = sh_dmae_shutdown,
1386         .driver = {
1387                 .owner  = THIS_MODULE,
1388                 .name   = "sh-dma-engine",
1389                 .pm     = &sh_dmae_pm,
1390         },
1391 };
1392
1393 static int __init sh_dmae_init(void)
1394 {
1395         /* Wire up NMI handling */
1396         int err = register_die_notifier(&sh_dmae_nmi_notifier);
1397         if (err)
1398                 return err;
1399
1400         return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1401 }
1402 module_init(sh_dmae_init);
1403
1404 static void __exit sh_dmae_exit(void)
1405 {
1406         platform_driver_unregister(&sh_dmae_driver);
1407
1408         unregister_die_notifier(&sh_dmae_nmi_notifier);
1409 }
1410 module_exit(sh_dmae_exit);
1411
1412 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1413 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1414 MODULE_LICENSE("GPL");
1415 MODULE_ALIAS("platform:sh-dma-engine");