dmaengine: consolidate tx_status functions
[linux-2.6.git] / drivers / dma / timb_dma.c
1 /*
2  * timb_dma.c timberdale FPGA DMA driver
3  * Copyright (c) 2010 Intel Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  */
18
19 /* Supports:
20  * Timberdale FPGA DMA engine
21  */
22
23 #include <linux/dmaengine.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/io.h>
28 #include <linux/module.h>
29 #include <linux/platform_device.h>
30 #include <linux/slab.h>
31
32 #include <linux/timb_dma.h>
33
34 #include "dmaengine.h"
35
36 #define DRIVER_NAME "timb-dma"
37
38 /* Global DMA registers */
39 #define TIMBDMA_ACR             0x34
40 #define TIMBDMA_32BIT_ADDR      0x01
41
42 #define TIMBDMA_ISR             0x080000
43 #define TIMBDMA_IPR             0x080004
44 #define TIMBDMA_IER             0x080008
45
46 /* Channel specific registers */
47 /* RX instances base addresses are 0x00, 0x40, 0x80 ...
48  * TX instances base addresses are 0x18, 0x58, 0x98 ...
49  */
50 #define TIMBDMA_INSTANCE_OFFSET         0x40
51 #define TIMBDMA_INSTANCE_TX_OFFSET      0x18
52
53 /* RX registers, relative the instance base */
54 #define TIMBDMA_OFFS_RX_DHAR    0x00
55 #define TIMBDMA_OFFS_RX_DLAR    0x04
56 #define TIMBDMA_OFFS_RX_LR      0x0C
57 #define TIMBDMA_OFFS_RX_BLR     0x10
58 #define TIMBDMA_OFFS_RX_ER      0x14
59 #define TIMBDMA_RX_EN           0x01
60 /* bytes per Row, video specific register
61  * which is placed after the TX registers...
62  */
63 #define TIMBDMA_OFFS_RX_BPRR    0x30
64
65 /* TX registers, relative the instance base */
66 #define TIMBDMA_OFFS_TX_DHAR    0x00
67 #define TIMBDMA_OFFS_TX_DLAR    0x04
68 #define TIMBDMA_OFFS_TX_BLR     0x0C
69 #define TIMBDMA_OFFS_TX_LR      0x14
70
71
72 #define TIMB_DMA_DESC_SIZE      8
73
74 struct timb_dma_desc {
75         struct list_head                desc_node;
76         struct dma_async_tx_descriptor  txd;
77         u8                              *desc_list;
78         unsigned int                    desc_list_len;
79         bool                            interrupt;
80 };
81
82 struct timb_dma_chan {
83         struct dma_chan         chan;
84         void __iomem            *membase;
85         spinlock_t              lock; /* Used to protect data structures,
86                                         especially the lists and descriptors,
87                                         from races between the tasklet and calls
88                                         from above */
89         bool                    ongoing;
90         struct list_head        active_list;
91         struct list_head        queue;
92         struct list_head        free_list;
93         unsigned int            bytes_per_line;
94         enum dma_data_direction direction;
95         unsigned int            descs; /* Descriptors to allocate */
96         unsigned int            desc_elems; /* number of elems per descriptor */
97 };
98
99 struct timb_dma {
100         struct dma_device       dma;
101         void __iomem            *membase;
102         struct tasklet_struct   tasklet;
103         struct timb_dma_chan    channels[0];
104 };
105
106 static struct device *chan2dev(struct dma_chan *chan)
107 {
108         return &chan->dev->device;
109 }
110 static struct device *chan2dmadev(struct dma_chan *chan)
111 {
112         return chan2dev(chan)->parent->parent;
113 }
114
115 static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
116 {
117         int id = td_chan->chan.chan_id;
118         return (struct timb_dma *)((u8 *)td_chan -
119                 id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
120 }
121
122 /* Must be called with the spinlock held */
123 static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
124 {
125         int id = td_chan->chan.chan_id;
126         struct timb_dma *td = tdchantotd(td_chan);
127         u32 ier;
128
129         /* enable interrupt for this channel */
130         ier = ioread32(td->membase + TIMBDMA_IER);
131         ier |= 1 << id;
132         dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
133                 ier);
134         iowrite32(ier, td->membase + TIMBDMA_IER);
135 }
136
137 /* Should be called with the spinlock held */
138 static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
139 {
140         int id = td_chan->chan.chan_id;
141         struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
142                 id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
143         u32 isr;
144         bool done = false;
145
146         dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
147
148         isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
149         if (isr) {
150                 iowrite32(isr, td->membase + TIMBDMA_ISR);
151                 done = true;
152         }
153
154         return done;
155 }
156
157 static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
158         bool single)
159 {
160         dma_addr_t addr;
161         int len;
162
163         addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
164                 dma_desc[4];
165
166         len = (dma_desc[3] << 8) | dma_desc[2];
167
168         if (single)
169                 dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
170                         td_chan->direction);
171         else
172                 dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
173                         td_chan->direction);
174 }
175
176 static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
177 {
178         struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
179                 struct timb_dma_chan, chan);
180         u8 *descs;
181
182         for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
183                 __td_unmap_desc(td_chan, descs, single);
184                 if (descs[0] & 0x02)
185                         break;
186         }
187 }
188
189 static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
190         struct scatterlist *sg, bool last)
191 {
192         if (sg_dma_len(sg) > USHRT_MAX) {
193                 dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
194                 return -EINVAL;
195         }
196
197         /* length must be word aligned */
198         if (sg_dma_len(sg) % sizeof(u32)) {
199                 dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
200                         sg_dma_len(sg));
201                 return -EINVAL;
202         }
203
204         dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n",
205                 dma_desc, (unsigned long long)sg_dma_address(sg));
206
207         dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
208         dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
209         dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
210         dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
211
212         dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
213         dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
214
215         dma_desc[1] = 0x00;
216         dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
217
218         return 0;
219 }
220
221 /* Must be called with the spinlock held */
222 static void __td_start_dma(struct timb_dma_chan *td_chan)
223 {
224         struct timb_dma_desc *td_desc;
225
226         if (td_chan->ongoing) {
227                 dev_err(chan2dev(&td_chan->chan),
228                         "Transfer already ongoing\n");
229                 return;
230         }
231
232         td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
233                 desc_node);
234
235         dev_dbg(chan2dev(&td_chan->chan),
236                 "td_chan: %p, chan: %d, membase: %p\n",
237                 td_chan, td_chan->chan.chan_id, td_chan->membase);
238
239         if (td_chan->direction == DMA_FROM_DEVICE) {
240
241                 /* descriptor address */
242                 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
243                 iowrite32(td_desc->txd.phys, td_chan->membase +
244                         TIMBDMA_OFFS_RX_DLAR);
245                 /* Bytes per line */
246                 iowrite32(td_chan->bytes_per_line, td_chan->membase +
247                         TIMBDMA_OFFS_RX_BPRR);
248                 /* enable RX */
249                 iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
250         } else {
251                 /* address high */
252                 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
253                 iowrite32(td_desc->txd.phys, td_chan->membase +
254                         TIMBDMA_OFFS_TX_DLAR);
255         }
256
257         td_chan->ongoing = true;
258
259         if (td_desc->interrupt)
260                 __td_enable_chan_irq(td_chan);
261 }
262
263 static void __td_finish(struct timb_dma_chan *td_chan)
264 {
265         dma_async_tx_callback           callback;
266         void                            *param;
267         struct dma_async_tx_descriptor  *txd;
268         struct timb_dma_desc            *td_desc;
269
270         /* can happen if the descriptor is canceled */
271         if (list_empty(&td_chan->active_list))
272                 return;
273
274         td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
275                 desc_node);
276         txd = &td_desc->txd;
277
278         dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
279                 txd->cookie);
280
281         /* make sure to stop the transfer */
282         if (td_chan->direction == DMA_FROM_DEVICE)
283                 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
284 /* Currently no support for stopping DMA transfers
285         else
286                 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
287 */
288         dma_cookie_complete(txd);
289         td_chan->ongoing = false;
290
291         callback = txd->callback;
292         param = txd->callback_param;
293
294         list_move(&td_desc->desc_node, &td_chan->free_list);
295
296         if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
297                 __td_unmap_descs(td_desc,
298                         txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
299
300         /*
301          * The API requires that no submissions are done from a
302          * callback, so we don't need to drop the lock here
303          */
304         if (callback)
305                 callback(param);
306 }
307
308 static u32 __td_ier_mask(struct timb_dma *td)
309 {
310         int i;
311         u32 ret = 0;
312
313         for (i = 0; i < td->dma.chancnt; i++) {
314                 struct timb_dma_chan *td_chan = td->channels + i;
315                 if (td_chan->ongoing) {
316                         struct timb_dma_desc *td_desc =
317                                 list_entry(td_chan->active_list.next,
318                                 struct timb_dma_desc, desc_node);
319                         if (td_desc->interrupt)
320                                 ret |= 1 << i;
321                 }
322         }
323
324         return ret;
325 }
326
327 static void __td_start_next(struct timb_dma_chan *td_chan)
328 {
329         struct timb_dma_desc *td_desc;
330
331         BUG_ON(list_empty(&td_chan->queue));
332         BUG_ON(td_chan->ongoing);
333
334         td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
335                 desc_node);
336
337         dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
338                 __func__, td_desc->txd.cookie);
339
340         list_move(&td_desc->desc_node, &td_chan->active_list);
341         __td_start_dma(td_chan);
342 }
343
344 static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
345 {
346         struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
347                 txd);
348         struct timb_dma_chan *td_chan = container_of(txd->chan,
349                 struct timb_dma_chan, chan);
350         dma_cookie_t cookie;
351
352         spin_lock_bh(&td_chan->lock);
353         cookie = dma_cookie_assign(txd);
354
355         if (list_empty(&td_chan->active_list)) {
356                 dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
357                         txd->cookie);
358                 list_add_tail(&td_desc->desc_node, &td_chan->active_list);
359                 __td_start_dma(td_chan);
360         } else {
361                 dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
362                         txd->cookie);
363
364                 list_add_tail(&td_desc->desc_node, &td_chan->queue);
365         }
366
367         spin_unlock_bh(&td_chan->lock);
368
369         return cookie;
370 }
371
372 static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
373 {
374         struct dma_chan *chan = &td_chan->chan;
375         struct timb_dma_desc *td_desc;
376         int err;
377
378         td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
379         if (!td_desc) {
380                 dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
381                 goto out;
382         }
383
384         td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
385
386         td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
387         if (!td_desc->desc_list) {
388                 dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
389                 goto err;
390         }
391
392         dma_async_tx_descriptor_init(&td_desc->txd, chan);
393         td_desc->txd.tx_submit = td_tx_submit;
394         td_desc->txd.flags = DMA_CTRL_ACK;
395
396         td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
397                 td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
398
399         err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
400         if (err) {
401                 dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
402                 goto err;
403         }
404
405         return td_desc;
406 err:
407         kfree(td_desc->desc_list);
408         kfree(td_desc);
409 out:
410         return NULL;
411
412 }
413
414 static void td_free_desc(struct timb_dma_desc *td_desc)
415 {
416         dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
417         dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
418                 td_desc->desc_list_len, DMA_TO_DEVICE);
419
420         kfree(td_desc->desc_list);
421         kfree(td_desc);
422 }
423
424 static void td_desc_put(struct timb_dma_chan *td_chan,
425         struct timb_dma_desc *td_desc)
426 {
427         dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
428
429         spin_lock_bh(&td_chan->lock);
430         list_add(&td_desc->desc_node, &td_chan->free_list);
431         spin_unlock_bh(&td_chan->lock);
432 }
433
434 static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
435 {
436         struct timb_dma_desc *td_desc, *_td_desc;
437         struct timb_dma_desc *ret = NULL;
438
439         spin_lock_bh(&td_chan->lock);
440         list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
441                 desc_node) {
442                 if (async_tx_test_ack(&td_desc->txd)) {
443                         list_del(&td_desc->desc_node);
444                         ret = td_desc;
445                         break;
446                 }
447                 dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
448                         td_desc);
449         }
450         spin_unlock_bh(&td_chan->lock);
451
452         return ret;
453 }
454
455 static int td_alloc_chan_resources(struct dma_chan *chan)
456 {
457         struct timb_dma_chan *td_chan =
458                 container_of(chan, struct timb_dma_chan, chan);
459         int i;
460
461         dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
462
463         BUG_ON(!list_empty(&td_chan->free_list));
464         for (i = 0; i < td_chan->descs; i++) {
465                 struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
466                 if (!td_desc) {
467                         if (i)
468                                 break;
469                         else {
470                                 dev_err(chan2dev(chan),
471                                         "Couldnt allocate any descriptors\n");
472                                 return -ENOMEM;
473                         }
474                 }
475
476                 td_desc_put(td_chan, td_desc);
477         }
478
479         spin_lock_bh(&td_chan->lock);
480         chan->completed_cookie = 1;
481         chan->cookie = 1;
482         spin_unlock_bh(&td_chan->lock);
483
484         return 0;
485 }
486
487 static void td_free_chan_resources(struct dma_chan *chan)
488 {
489         struct timb_dma_chan *td_chan =
490                 container_of(chan, struct timb_dma_chan, chan);
491         struct timb_dma_desc *td_desc, *_td_desc;
492         LIST_HEAD(list);
493
494         dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
495
496         /* check that all descriptors are free */
497         BUG_ON(!list_empty(&td_chan->active_list));
498         BUG_ON(!list_empty(&td_chan->queue));
499
500         spin_lock_bh(&td_chan->lock);
501         list_splice_init(&td_chan->free_list, &list);
502         spin_unlock_bh(&td_chan->lock);
503
504         list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
505                 dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
506                         td_desc);
507                 td_free_desc(td_desc);
508         }
509 }
510
511 static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
512                                     struct dma_tx_state *txstate)
513 {
514         struct timb_dma_chan *td_chan =
515                 container_of(chan, struct timb_dma_chan, chan);
516         enum dma_status ret;
517
518         dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
519
520         ret = dma_cookie_status(chan, cookie, txstate);
521
522         dev_dbg(chan2dev(chan),
523                 "%s: exit, ret: %d, last_complete: %d, last_used: %d\n",
524                 __func__, ret, last_complete, last_used);
525
526         return ret;
527 }
528
529 static void td_issue_pending(struct dma_chan *chan)
530 {
531         struct timb_dma_chan *td_chan =
532                 container_of(chan, struct timb_dma_chan, chan);
533
534         dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
535         spin_lock_bh(&td_chan->lock);
536
537         if (!list_empty(&td_chan->active_list))
538                 /* transfer ongoing */
539                 if (__td_dma_done_ack(td_chan))
540                         __td_finish(td_chan);
541
542         if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
543                 __td_start_next(td_chan);
544
545         spin_unlock_bh(&td_chan->lock);
546 }
547
548 static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
549         struct scatterlist *sgl, unsigned int sg_len,
550         enum dma_data_direction direction, unsigned long flags)
551 {
552         struct timb_dma_chan *td_chan =
553                 container_of(chan, struct timb_dma_chan, chan);
554         struct timb_dma_desc *td_desc;
555         struct scatterlist *sg;
556         unsigned int i;
557         unsigned int desc_usage = 0;
558
559         if (!sgl || !sg_len) {
560                 dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
561                 return NULL;
562         }
563
564         /* even channels are for RX, odd for TX */
565         if (td_chan->direction != direction) {
566                 dev_err(chan2dev(chan),
567                         "Requesting channel in wrong direction\n");
568                 return NULL;
569         }
570
571         td_desc = td_desc_get(td_chan);
572         if (!td_desc) {
573                 dev_err(chan2dev(chan), "Not enough descriptors available\n");
574                 return NULL;
575         }
576
577         td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
578
579         for_each_sg(sgl, sg, sg_len, i) {
580                 int err;
581                 if (desc_usage > td_desc->desc_list_len) {
582                         dev_err(chan2dev(chan), "No descriptor space\n");
583                         return NULL;
584                 }
585
586                 err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
587                         i == (sg_len - 1));
588                 if (err) {
589                         dev_err(chan2dev(chan), "Failed to update desc: %d\n",
590                                 err);
591                         td_desc_put(td_chan, td_desc);
592                         return NULL;
593                 }
594                 desc_usage += TIMB_DMA_DESC_SIZE;
595         }
596
597         dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
598                 td_desc->desc_list_len, DMA_TO_DEVICE);
599
600         return &td_desc->txd;
601 }
602
603 static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
604                       unsigned long arg)
605 {
606         struct timb_dma_chan *td_chan =
607                 container_of(chan, struct timb_dma_chan, chan);
608         struct timb_dma_desc *td_desc, *_td_desc;
609
610         dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
611
612         if (cmd != DMA_TERMINATE_ALL)
613                 return -ENXIO;
614
615         /* first the easy part, put the queue into the free list */
616         spin_lock_bh(&td_chan->lock);
617         list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
618                 desc_node)
619                 list_move(&td_desc->desc_node, &td_chan->free_list);
620
621         /* now tear down the running */
622         __td_finish(td_chan);
623         spin_unlock_bh(&td_chan->lock);
624
625         return 0;
626 }
627
628 static void td_tasklet(unsigned long data)
629 {
630         struct timb_dma *td = (struct timb_dma *)data;
631         u32 isr;
632         u32 ipr;
633         u32 ier;
634         int i;
635
636         isr = ioread32(td->membase + TIMBDMA_ISR);
637         ipr = isr & __td_ier_mask(td);
638
639         /* ack the interrupts */
640         iowrite32(ipr, td->membase + TIMBDMA_ISR);
641
642         for (i = 0; i < td->dma.chancnt; i++)
643                 if (ipr & (1 << i)) {
644                         struct timb_dma_chan *td_chan = td->channels + i;
645                         spin_lock(&td_chan->lock);
646                         __td_finish(td_chan);
647                         if (!list_empty(&td_chan->queue))
648                                 __td_start_next(td_chan);
649                         spin_unlock(&td_chan->lock);
650                 }
651
652         ier = __td_ier_mask(td);
653         iowrite32(ier, td->membase + TIMBDMA_IER);
654 }
655
656
657 static irqreturn_t td_irq(int irq, void *devid)
658 {
659         struct timb_dma *td = devid;
660         u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
661
662         if (ipr) {
663                 /* disable interrupts, will be re-enabled in tasklet */
664                 iowrite32(0, td->membase + TIMBDMA_IER);
665
666                 tasklet_schedule(&td->tasklet);
667
668                 return IRQ_HANDLED;
669         } else
670                 return IRQ_NONE;
671 }
672
673
674 static int __devinit td_probe(struct platform_device *pdev)
675 {
676         struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
677         struct timb_dma *td;
678         struct resource *iomem;
679         int irq;
680         int err;
681         int i;
682
683         if (!pdata) {
684                 dev_err(&pdev->dev, "No platform data\n");
685                 return -EINVAL;
686         }
687
688         iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
689         if (!iomem)
690                 return -EINVAL;
691
692         irq = platform_get_irq(pdev, 0);
693         if (irq < 0)
694                 return irq;
695
696         if (!request_mem_region(iomem->start, resource_size(iomem),
697                 DRIVER_NAME))
698                 return -EBUSY;
699
700         td  = kzalloc(sizeof(struct timb_dma) +
701                 sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
702         if (!td) {
703                 err = -ENOMEM;
704                 goto err_release_region;
705         }
706
707         dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
708
709         td->membase = ioremap(iomem->start, resource_size(iomem));
710         if (!td->membase) {
711                 dev_err(&pdev->dev, "Failed to remap I/O memory\n");
712                 err = -ENOMEM;
713                 goto err_free_mem;
714         }
715
716         /* 32bit addressing */
717         iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
718
719         /* disable and clear any interrupts */
720         iowrite32(0x0, td->membase + TIMBDMA_IER);
721         iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
722
723         tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
724
725         err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
726         if (err) {
727                 dev_err(&pdev->dev, "Failed to request IRQ\n");
728                 goto err_tasklet_kill;
729         }
730
731         td->dma.device_alloc_chan_resources     = td_alloc_chan_resources;
732         td->dma.device_free_chan_resources      = td_free_chan_resources;
733         td->dma.device_tx_status                = td_tx_status;
734         td->dma.device_issue_pending            = td_issue_pending;
735
736         dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
737         dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
738         td->dma.device_prep_slave_sg = td_prep_slave_sg;
739         td->dma.device_control = td_control;
740
741         td->dma.dev = &pdev->dev;
742
743         INIT_LIST_HEAD(&td->dma.channels);
744
745         for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) {
746                 struct timb_dma_chan *td_chan = &td->channels[i];
747                 struct timb_dma_platform_data_channel *pchan =
748                         pdata->channels + i;
749
750                 /* even channels are RX, odd are TX */
751                 if ((i % 2) == pchan->rx) {
752                         dev_err(&pdev->dev, "Wrong channel configuration\n");
753                         err = -EINVAL;
754                         goto err_tasklet_kill;
755                 }
756
757                 td_chan->chan.device = &td->dma;
758                 td_chan->chan.cookie = 1;
759                 td_chan->chan.chan_id = i;
760                 spin_lock_init(&td_chan->lock);
761                 INIT_LIST_HEAD(&td_chan->active_list);
762                 INIT_LIST_HEAD(&td_chan->queue);
763                 INIT_LIST_HEAD(&td_chan->free_list);
764
765                 td_chan->descs = pchan->descriptors;
766                 td_chan->desc_elems = pchan->descriptor_elements;
767                 td_chan->bytes_per_line = pchan->bytes_per_line;
768                 td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
769                         DMA_TO_DEVICE;
770
771                 td_chan->membase = td->membase +
772                         (i / 2) * TIMBDMA_INSTANCE_OFFSET +
773                         (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
774
775                 dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
776                         i, td_chan->membase);
777
778                 list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
779         }
780
781         err = dma_async_device_register(&td->dma);
782         if (err) {
783                 dev_err(&pdev->dev, "Failed to register async device\n");
784                 goto err_free_irq;
785         }
786
787         platform_set_drvdata(pdev, td);
788
789         dev_dbg(&pdev->dev, "Probe result: %d\n", err);
790         return err;
791
792 err_free_irq:
793         free_irq(irq, td);
794 err_tasklet_kill:
795         tasklet_kill(&td->tasklet);
796         iounmap(td->membase);
797 err_free_mem:
798         kfree(td);
799 err_release_region:
800         release_mem_region(iomem->start, resource_size(iomem));
801
802         return err;
803
804 }
805
806 static int __devexit td_remove(struct platform_device *pdev)
807 {
808         struct timb_dma *td = platform_get_drvdata(pdev);
809         struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
810         int irq = platform_get_irq(pdev, 0);
811
812         dma_async_device_unregister(&td->dma);
813         free_irq(irq, td);
814         tasklet_kill(&td->tasklet);
815         iounmap(td->membase);
816         kfree(td);
817         release_mem_region(iomem->start, resource_size(iomem));
818
819         platform_set_drvdata(pdev, NULL);
820
821         dev_dbg(&pdev->dev, "Removed...\n");
822         return 0;
823 }
824
825 static struct platform_driver td_driver = {
826         .driver = {
827                 .name   = DRIVER_NAME,
828                 .owner  = THIS_MODULE,
829         },
830         .probe  = td_probe,
831         .remove = __exit_p(td_remove),
832 };
833
834 static int __init td_init(void)
835 {
836         return platform_driver_register(&td_driver);
837 }
838 module_init(td_init);
839
840 static void __exit td_exit(void)
841 {
842         platform_driver_unregister(&td_driver);
843 }
844 module_exit(td_exit);
845
846 MODULE_LICENSE("GPL v2");
847 MODULE_DESCRIPTION("Timberdale DMA controller driver");
848 MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
849 MODULE_ALIAS("platform:"DRIVER_NAME);