DMAENGINE: ste_dma40: lock fix
[linux-2.6.git] / drivers / dma / ste_dma40.c
1 /*
2  * driver/dma/ste_dma40.c
3  *
4  * Copyright (C) ST-Ericsson 2007-2010
5  * License terms: GNU General Public License (GPL) version 2
6  * Author: Per Friden <per.friden@stericsson.com>
7  * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8  *
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17
18 #include <plat/ste_dma40.h>
19
20 #include "ste_dma40_ll.h"
21
22 #define D40_NAME "dma40"
23
24 #define D40_PHY_CHAN -1
25
26 /* For masking out/in 2 bit channel positions */
27 #define D40_CHAN_POS(chan)  (2 * (chan / 2))
28 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29
30 /* Maximum iterations taken before giving up suspending a channel */
31 #define D40_SUSPEND_MAX_IT 500
32
33 #define D40_ALLOC_FREE          (1 << 31)
34 #define D40_ALLOC_PHY           (1 << 30)
35 #define D40_ALLOC_LOG_FREE      0
36
37 /* Hardware designer of the block */
38 #define D40_PERIPHID2_DESIGNER 0x8
39
40 /**
41  * enum 40_command - The different commands and/or statuses.
42  *
43  * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
44  * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
45  * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
46  * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
47  */
48 enum d40_command {
49         D40_DMA_STOP            = 0,
50         D40_DMA_RUN             = 1,
51         D40_DMA_SUSPEND_REQ     = 2,
52         D40_DMA_SUSPENDED       = 3
53 };
54
55 /**
56  * struct d40_lli_pool - Structure for keeping LLIs in memory
57  *
58  * @base: Pointer to memory area when the pre_alloc_lli's are not large
59  * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
60  * pre_alloc_lli is used.
61  * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
62  * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
63  * one buffer to one buffer.
64  */
65 struct d40_lli_pool {
66         void    *base;
67         int     size;
68         /* Space for dst and src, plus an extra for padding */
69         u8      pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
70 };
71
72 /**
73  * struct d40_desc - A descriptor is one DMA job.
74  *
75  * @lli_phy: LLI settings for physical channel. Both src and dst=
76  * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
77  * lli_len equals one.
78  * @lli_log: Same as above but for logical channels.
79  * @lli_pool: The pool with two entries pre-allocated.
80  * @lli_len: Number of llis of current descriptor.
81  * @lli_count: Number of transfered llis.
82  * @lli_tx_len: Max number of LLIs per transfer, there can be
83  * many transfer for one descriptor.
84  * @txd: DMA engine struct. Used for among other things for communication
85  * during a transfer.
86  * @node: List entry.
87  * @dir: The transfer direction of this job.
88  * @is_in_client_list: true if the client owns this descriptor.
89  *
90  * This descriptor is used for both logical and physical transfers.
91  */
92
93 struct d40_desc {
94         /* LLI physical */
95         struct d40_phy_lli_bidir         lli_phy;
96         /* LLI logical */
97         struct d40_log_lli_bidir         lli_log;
98
99         struct d40_lli_pool              lli_pool;
100         int                              lli_len;
101         int                              lli_count;
102         u32                              lli_tx_len;
103
104         struct dma_async_tx_descriptor   txd;
105         struct list_head                 node;
106
107         enum dma_data_direction          dir;
108         bool                             is_in_client_list;
109 };
110
111 /**
112  * struct d40_lcla_pool - LCLA pool settings and data.
113  *
114  * @base: The virtual address of LCLA.
115  * @phy: Physical base address of LCLA.
116  * @base_size: size of lcla.
117  * @lock: Lock to protect the content in this struct.
118  * @alloc_map: Mapping between physical channel and LCLA entries.
119  * @num_blocks: The number of entries of alloc_map. Equals to the
120  * number of physical channels.
121  */
122 struct d40_lcla_pool {
123         void            *base;
124         dma_addr_t       phy;
125         resource_size_t  base_size;
126         spinlock_t       lock;
127         u32             *alloc_map;
128         int              num_blocks;
129 };
130
131 /**
132  * struct d40_phy_res - struct for handling eventlines mapped to physical
133  * channels.
134  *
135  * @lock: A lock protection this entity.
136  * @num: The physical channel number of this entity.
137  * @allocated_src: Bit mapped to show which src event line's are mapped to
138  * this physical channel. Can also be free or physically allocated.
139  * @allocated_dst: Same as for src but is dst.
140  * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
141  * event line number. Both allocated_src and allocated_dst can not be
142  * allocated to a physical channel, since the interrupt handler has then
143  * no way of figure out which one the interrupt belongs to.
144  */
145 struct d40_phy_res {
146         spinlock_t lock;
147         int        num;
148         u32        allocated_src;
149         u32        allocated_dst;
150 };
151
152 struct d40_base;
153
154 /**
155  * struct d40_chan - Struct that describes a channel.
156  *
157  * @lock: A spinlock to protect this struct.
158  * @log_num: The logical number, if any of this channel.
159  * @completed: Starts with 1, after first interrupt it is set to dma engine's
160  * current cookie.
161  * @pending_tx: The number of pending transfers. Used between interrupt handler
162  * and tasklet.
163  * @busy: Set to true when transfer is ongoing on this channel.
164  * @phy_chan: Pointer to physical channel which this instance runs on. If this
165  * point is NULL, then the channel is not allocated.
166  * @chan: DMA engine handle.
167  * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
168  * transfer and call client callback.
169  * @client: Cliented owned descriptor list.
170  * @active: Active descriptor.
171  * @queue: Queued jobs.
172  * @dma_cfg: The client configuration of this dma channel.
173  * @base: Pointer to the device instance struct.
174  * @src_def_cfg: Default cfg register setting for src.
175  * @dst_def_cfg: Default cfg register setting for dst.
176  * @log_def: Default logical channel settings.
177  * @lcla: Space for one dst src pair for logical channel transfers.
178  * @lcpa: Pointer to dst and src lcpa settings.
179  *
180  * This struct can either "be" a logical or a physical channel.
181  */
182 struct d40_chan {
183         spinlock_t                       lock;
184         int                              log_num;
185         /* ID of the most recent completed transfer */
186         int                              completed;
187         int                              pending_tx;
188         bool                             busy;
189         struct d40_phy_res              *phy_chan;
190         struct dma_chan                  chan;
191         struct tasklet_struct            tasklet;
192         struct list_head                 client;
193         struct list_head                 active;
194         struct list_head                 queue;
195         struct stedma40_chan_cfg         dma_cfg;
196         struct d40_base                 *base;
197         /* Default register configurations */
198         u32                              src_def_cfg;
199         u32                              dst_def_cfg;
200         struct d40_def_lcsp              log_def;
201         struct d40_lcla_elem             lcla;
202         struct d40_log_lli_full         *lcpa;
203 };
204
205 /**
206  * struct d40_base - The big global struct, one for each probe'd instance.
207  *
208  * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
209  * @execmd_lock: Lock for execute command usage since several channels share
210  * the same physical register.
211  * @dev: The device structure.
212  * @virtbase: The virtual base address of the DMA's register.
213  * @clk: Pointer to the DMA clock structure.
214  * @phy_start: Physical memory start of the DMA registers.
215  * @phy_size: Size of the DMA register map.
216  * @irq: The IRQ number.
217  * @num_phy_chans: The number of physical channels. Read from HW. This
218  * is the number of available channels for this driver, not counting "Secure
219  * mode" allocated physical channels.
220  * @num_log_chans: The number of logical channels. Calculated from
221  * num_phy_chans.
222  * @dma_both: dma_device channels that can do both memcpy and slave transfers.
223  * @dma_slave: dma_device channels that can do only do slave transfers.
224  * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
225  * @phy_chans: Room for all possible physical channels in system.
226  * @log_chans: Room for all possible logical channels in system.
227  * @lookup_log_chans: Used to map interrupt number to logical channel. Points
228  * to log_chans entries.
229  * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
230  * to phy_chans entries.
231  * @plat_data: Pointer to provided platform_data which is the driver
232  * configuration.
233  * @phy_res: Vector containing all physical channels.
234  * @lcla_pool: lcla pool settings and data.
235  * @lcpa_base: The virtual mapped address of LCPA.
236  * @phy_lcpa: The physical address of the LCPA.
237  * @lcpa_size: The size of the LCPA area.
238  * @desc_slab: cache for descriptors.
239  */
240 struct d40_base {
241         spinlock_t                       interrupt_lock;
242         spinlock_t                       execmd_lock;
243         struct device                    *dev;
244         void __iomem                     *virtbase;
245         struct clk                       *clk;
246         phys_addr_t                       phy_start;
247         resource_size_t                   phy_size;
248         int                               irq;
249         int                               num_phy_chans;
250         int                               num_log_chans;
251         struct dma_device                 dma_both;
252         struct dma_device                 dma_slave;
253         struct dma_device                 dma_memcpy;
254         struct d40_chan                  *phy_chans;
255         struct d40_chan                  *log_chans;
256         struct d40_chan                 **lookup_log_chans;
257         struct d40_chan                 **lookup_phy_chans;
258         struct stedma40_platform_data    *plat_data;
259         /* Physical half channels */
260         struct d40_phy_res               *phy_res;
261         struct d40_lcla_pool              lcla_pool;
262         void                             *lcpa_base;
263         dma_addr_t                        phy_lcpa;
264         resource_size_t                   lcpa_size;
265         struct kmem_cache                *desc_slab;
266 };
267
268 /**
269  * struct d40_interrupt_lookup - lookup table for interrupt handler
270  *
271  * @src: Interrupt mask register.
272  * @clr: Interrupt clear register.
273  * @is_error: true if this is an error interrupt.
274  * @offset: start delta in the lookup_log_chans in d40_base. If equals to
275  * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
276  */
277 struct d40_interrupt_lookup {
278         u32 src;
279         u32 clr;
280         bool is_error;
281         int offset;
282 };
283
284 /**
285  * struct d40_reg_val - simple lookup struct
286  *
287  * @reg: The register.
288  * @val: The value that belongs to the register in reg.
289  */
290 struct d40_reg_val {
291         unsigned int reg;
292         unsigned int val;
293 };
294
295 static int d40_pool_lli_alloc(struct d40_desc *d40d,
296                               int lli_len, bool is_log)
297 {
298         u32 align;
299         void *base;
300
301         if (is_log)
302                 align = sizeof(struct d40_log_lli);
303         else
304                 align = sizeof(struct d40_phy_lli);
305
306         if (lli_len == 1) {
307                 base = d40d->lli_pool.pre_alloc_lli;
308                 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
309                 d40d->lli_pool.base = NULL;
310         } else {
311                 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
312
313                 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
314                 d40d->lli_pool.base = base;
315
316                 if (d40d->lli_pool.base == NULL)
317                         return -ENOMEM;
318         }
319
320         if (is_log) {
321                 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
322                                               align);
323                 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
324                                               align);
325         } else {
326                 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
327                                               align);
328                 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
329                                               align);
330
331                 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
332                 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
333         }
334
335         return 0;
336 }
337
338 static void d40_pool_lli_free(struct d40_desc *d40d)
339 {
340         kfree(d40d->lli_pool.base);
341         d40d->lli_pool.base = NULL;
342         d40d->lli_pool.size = 0;
343         d40d->lli_log.src = NULL;
344         d40d->lli_log.dst = NULL;
345         d40d->lli_phy.src = NULL;
346         d40d->lli_phy.dst = NULL;
347         d40d->lli_phy.src_addr = 0;
348         d40d->lli_phy.dst_addr = 0;
349 }
350
351 static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
352                                       struct d40_desc *desc)
353 {
354         dma_cookie_t cookie = d40c->chan.cookie;
355
356         if (++cookie < 0)
357                 cookie = 1;
358
359         d40c->chan.cookie = cookie;
360         desc->txd.cookie = cookie;
361
362         return cookie;
363 }
364
365 static void d40_desc_remove(struct d40_desc *d40d)
366 {
367         list_del(&d40d->node);
368 }
369
370 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
371 {
372         struct d40_desc *d;
373         struct d40_desc *_d;
374
375         if (!list_empty(&d40c->client)) {
376                 list_for_each_entry_safe(d, _d, &d40c->client, node)
377                         if (async_tx_test_ack(&d->txd)) {
378                                 d40_pool_lli_free(d);
379                                 d40_desc_remove(d);
380                                 break;
381                         }
382         } else {
383                 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
384                 if (d != NULL) {
385                         memset(d, 0, sizeof(struct d40_desc));
386                         INIT_LIST_HEAD(&d->node);
387                 }
388         }
389         return d;
390 }
391
392 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
393 {
394         kmem_cache_free(d40c->base->desc_slab, d40d);
395 }
396
397 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
398 {
399         list_add_tail(&desc->node, &d40c->active);
400 }
401
402 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
403 {
404         struct d40_desc *d;
405
406         if (list_empty(&d40c->active))
407                 return NULL;
408
409         d = list_first_entry(&d40c->active,
410                              struct d40_desc,
411                              node);
412         return d;
413 }
414
415 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
416 {
417         list_add_tail(&desc->node, &d40c->queue);
418 }
419
420 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
421 {
422         struct d40_desc *d;
423
424         if (list_empty(&d40c->queue))
425                 return NULL;
426
427         d = list_first_entry(&d40c->queue,
428                              struct d40_desc,
429                              node);
430         return d;
431 }
432
433 /* Support functions for logical channels */
434
435 static int d40_lcla_id_get(struct d40_chan *d40c,
436                            struct d40_lcla_pool *pool)
437 {
438         int src_id = 0;
439         int dst_id = 0;
440         struct d40_log_lli *lcla_lidx_base =
441                 pool->base + d40c->phy_chan->num * 1024;
442         int i;
443         int lli_per_log = d40c->base->plat_data->llis_per_log;
444         unsigned long flags;
445
446         if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
447                 return 0;
448
449         if (pool->num_blocks > 32)
450                 return -EINVAL;
451
452         spin_lock_irqsave(&pool->lock, flags);
453
454         for (i = 0; i < pool->num_blocks; i++) {
455                 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
456                         pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
457                         break;
458                 }
459         }
460         src_id = i;
461         if (src_id >= pool->num_blocks)
462                 goto err;
463
464         for (; i < pool->num_blocks; i++) {
465                 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
466                         pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
467                         break;
468                 }
469         }
470
471         dst_id = i;
472         if (dst_id == src_id)
473                 goto err;
474
475         d40c->lcla.src_id = src_id;
476         d40c->lcla.dst_id = dst_id;
477         d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
478         d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
479
480
481         spin_unlock_irqrestore(&pool->lock, flags);
482         return 0;
483 err:
484         spin_unlock_irqrestore(&pool->lock, flags);
485         return -EINVAL;
486 }
487
488 static void d40_lcla_id_put(struct d40_chan *d40c,
489                             struct d40_lcla_pool *pool,
490                             int id)
491 {
492         unsigned long flags;
493         if (id < 0)
494                 return;
495
496         d40c->lcla.src_id = -1;
497         d40c->lcla.dst_id = -1;
498
499         spin_lock_irqsave(&pool->lock, flags);
500         pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
501         spin_unlock_irqrestore(&pool->lock, flags);
502 }
503
504 static int d40_channel_execute_command(struct d40_chan *d40c,
505                                        enum d40_command command)
506 {
507         int status, i;
508         void __iomem *active_reg;
509         int ret = 0;
510         unsigned long flags;
511
512         spin_lock_irqsave(&d40c->base->execmd_lock, flags);
513
514         if (d40c->phy_chan->num % 2 == 0)
515                 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
516         else
517                 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
518
519         if (command == D40_DMA_SUSPEND_REQ) {
520                 status = (readl(active_reg) &
521                           D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
522                         D40_CHAN_POS(d40c->phy_chan->num);
523
524                 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
525                         goto done;
526         }
527
528         writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
529
530         if (command == D40_DMA_SUSPEND_REQ) {
531
532                 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
533                         status = (readl(active_reg) &
534                                   D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
535                                 D40_CHAN_POS(d40c->phy_chan->num);
536
537                         cpu_relax();
538                         /*
539                          * Reduce the number of bus accesses while
540                          * waiting for the DMA to suspend.
541                          */
542                         udelay(3);
543
544                         if (status == D40_DMA_STOP ||
545                             status == D40_DMA_SUSPENDED)
546                                 break;
547                 }
548
549                 if (i == D40_SUSPEND_MAX_IT) {
550                         dev_err(&d40c->chan.dev->device,
551                                 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
552                                 __func__, d40c->phy_chan->num, d40c->log_num,
553                                 status);
554                         dump_stack();
555                         ret = -EBUSY;
556                 }
557
558         }
559 done:
560         spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
561         return ret;
562 }
563
564 static void d40_term_all(struct d40_chan *d40c)
565 {
566         struct d40_desc *d40d;
567
568         /* Release active descriptors */
569         while ((d40d = d40_first_active_get(d40c))) {
570                 d40_desc_remove(d40d);
571
572                 /* Return desc to free-list */
573                 d40_desc_free(d40c, d40d);
574         }
575
576         /* Release queued descriptors waiting for transfer */
577         while ((d40d = d40_first_queued(d40c))) {
578                 d40_desc_remove(d40d);
579
580                 /* Return desc to free-list */
581                 d40_desc_free(d40c, d40d);
582         }
583
584         d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
585                         d40c->lcla.src_id);
586         d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
587                         d40c->lcla.dst_id);
588
589         d40c->pending_tx = 0;
590         d40c->busy = false;
591 }
592
593 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
594 {
595         u32 val;
596         unsigned long flags;
597
598         if (do_enable)
599                 val = D40_ACTIVATE_EVENTLINE;
600         else
601                 val = D40_DEACTIVATE_EVENTLINE;
602
603         spin_lock_irqsave(&d40c->phy_chan->lock, flags);
604
605         /* Enable event line connected to device (or memcpy) */
606         if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
607             (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
608                 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
609
610                 writel((val << D40_EVENTLINE_POS(event)) |
611                        ~D40_EVENTLINE_MASK(event),
612                        d40c->base->virtbase + D40_DREG_PCBASE +
613                        d40c->phy_chan->num * D40_DREG_PCDELTA +
614                        D40_CHAN_REG_SSLNK);
615         }
616         if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM) {
617                 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
618
619                 writel((val << D40_EVENTLINE_POS(event)) |
620                        ~D40_EVENTLINE_MASK(event),
621                        d40c->base->virtbase + D40_DREG_PCBASE +
622                        d40c->phy_chan->num * D40_DREG_PCDELTA +
623                        D40_CHAN_REG_SDLNK);
624         }
625
626         spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
627 }
628
629 static u32 d40_chan_has_events(struct d40_chan *d40c)
630 {
631         u32 val = 0;
632
633         /* If SSLNK or SDLNK is zero all events are disabled */
634         if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
635             (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
636                 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
637                             d40c->phy_chan->num * D40_DREG_PCDELTA +
638                             D40_CHAN_REG_SSLNK);
639
640         if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM)
641                 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
642                             d40c->phy_chan->num * D40_DREG_PCDELTA +
643                             D40_CHAN_REG_SDLNK);
644         return val;
645 }
646
647 static void d40_config_enable_lidx(struct d40_chan *d40c)
648 {
649         /* Set LIDX for lcla */
650         writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
651                D40_SREG_ELEM_LOG_LIDX_MASK,
652                d40c->base->virtbase + D40_DREG_PCBASE +
653                d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
654
655         writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
656                D40_SREG_ELEM_LOG_LIDX_MASK,
657                d40c->base->virtbase + D40_DREG_PCBASE +
658                d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
659 }
660
661 static int d40_config_write(struct d40_chan *d40c)
662 {
663         u32 addr_base;
664         u32 var;
665         int res;
666
667         res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
668         if (res)
669                 return res;
670
671         /* Odd addresses are even addresses + 4 */
672         addr_base = (d40c->phy_chan->num % 2) * 4;
673         /* Setup channel mode to logical or physical */
674         var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
675                 D40_CHAN_POS(d40c->phy_chan->num);
676         writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
677
678         /* Setup operational mode option register */
679         var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
680                0x3) << D40_CHAN_POS(d40c->phy_chan->num);
681
682         writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
683
684         if (d40c->log_num != D40_PHY_CHAN) {
685                 /* Set default config for CFG reg */
686                 writel(d40c->src_def_cfg,
687                        d40c->base->virtbase + D40_DREG_PCBASE +
688                        d40c->phy_chan->num * D40_DREG_PCDELTA +
689                        D40_CHAN_REG_SSCFG);
690                 writel(d40c->dst_def_cfg,
691                        d40c->base->virtbase + D40_DREG_PCBASE +
692                        d40c->phy_chan->num * D40_DREG_PCDELTA +
693                        D40_CHAN_REG_SDCFG);
694
695                 d40_config_enable_lidx(d40c);
696         }
697         return res;
698 }
699
700 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
701 {
702
703         if (d40d->lli_phy.dst && d40d->lli_phy.src) {
704                 d40_phy_lli_write(d40c->base->virtbase,
705                                   d40c->phy_chan->num,
706                                   d40d->lli_phy.dst,
707                                   d40d->lli_phy.src);
708         } else if (d40d->lli_log.dst && d40d->lli_log.src) {
709                 struct d40_log_lli *src = d40d->lli_log.src;
710                 struct d40_log_lli *dst = d40d->lli_log.dst;
711
712                 src += d40d->lli_count;
713                 dst += d40d->lli_count;
714                 d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
715                                   d40c->lcla.dst,
716                                   dst, src,
717                                   d40c->base->plat_data->llis_per_log);
718         }
719         d40d->lli_count += d40d->lli_tx_len;
720 }
721
722 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
723 {
724         struct d40_chan *d40c = container_of(tx->chan,
725                                              struct d40_chan,
726                                              chan);
727         struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
728         unsigned long flags;
729
730         spin_lock_irqsave(&d40c->lock, flags);
731
732         tx->cookie = d40_assign_cookie(d40c, d40d);
733
734         d40_desc_queue(d40c, d40d);
735
736         spin_unlock_irqrestore(&d40c->lock, flags);
737
738         return tx->cookie;
739 }
740
741 static int d40_start(struct d40_chan *d40c)
742 {
743         int err;
744
745         if (d40c->log_num != D40_PHY_CHAN) {
746                 err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
747                 if (err)
748                         return err;
749                 d40_config_set_event(d40c, true);
750         }
751
752         err = d40_channel_execute_command(d40c, D40_DMA_RUN);
753
754         return err;
755 }
756
757 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
758 {
759         struct d40_desc *d40d;
760         int err;
761
762         /* Start queued jobs, if any */
763         d40d = d40_first_queued(d40c);
764
765         if (d40d != NULL) {
766                 d40c->busy = true;
767
768                 /* Remove from queue */
769                 d40_desc_remove(d40d);
770
771                 /* Add to active queue */
772                 d40_desc_submit(d40c, d40d);
773
774                 /* Initiate DMA job */
775                 d40_desc_load(d40c, d40d);
776
777                 /* Start dma job */
778                 err = d40_start(d40c);
779
780                 if (err)
781                         return NULL;
782         }
783
784         return d40d;
785 }
786
787 /* called from interrupt context */
788 static void dma_tc_handle(struct d40_chan *d40c)
789 {
790         struct d40_desc *d40d;
791
792         if (!d40c->phy_chan)
793                 return;
794
795         /* Get first active entry from list */
796         d40d = d40_first_active_get(d40c);
797
798         if (d40d == NULL)
799                 return;
800
801         if (d40d->lli_count < d40d->lli_len) {
802
803                 d40_desc_load(d40c, d40d);
804                 /* Start dma job */
805                 (void) d40_start(d40c);
806                 return;
807         }
808
809         if (d40_queue_start(d40c) == NULL)
810                 d40c->busy = false;
811
812         d40c->pending_tx++;
813         tasklet_schedule(&d40c->tasklet);
814
815 }
816
817 static void dma_tasklet(unsigned long data)
818 {
819         struct d40_chan *d40c = (struct d40_chan *) data;
820         struct d40_desc *d40d_fin;
821         unsigned long flags;
822         dma_async_tx_callback callback;
823         void *callback_param;
824
825         spin_lock_irqsave(&d40c->lock, flags);
826
827         /* Get first active entry from list */
828         d40d_fin = d40_first_active_get(d40c);
829
830         if (d40d_fin == NULL)
831                 goto err;
832
833         d40c->completed = d40d_fin->txd.cookie;
834
835         /*
836          * If terminating a channel pending_tx is set to zero.
837          * This prevents any finished active jobs to return to the client.
838          */
839         if (d40c->pending_tx == 0) {
840                 spin_unlock_irqrestore(&d40c->lock, flags);
841                 return;
842         }
843
844         /* Callback to client */
845         callback = d40d_fin->txd.callback;
846         callback_param = d40d_fin->txd.callback_param;
847
848         if (async_tx_test_ack(&d40d_fin->txd)) {
849                 d40_pool_lli_free(d40d_fin);
850                 d40_desc_remove(d40d_fin);
851                 /* Return desc to free-list */
852                 d40_desc_free(d40c, d40d_fin);
853         } else {
854                 if (!d40d_fin->is_in_client_list) {
855                         d40_desc_remove(d40d_fin);
856                         list_add_tail(&d40d_fin->node, &d40c->client);
857                         d40d_fin->is_in_client_list = true;
858                 }
859         }
860
861         d40c->pending_tx--;
862
863         if (d40c->pending_tx)
864                 tasklet_schedule(&d40c->tasklet);
865
866         spin_unlock_irqrestore(&d40c->lock, flags);
867
868         if (callback)
869                 callback(callback_param);
870
871         return;
872
873  err:
874         /* Rescue manouver if receiving double interrupts */
875         if (d40c->pending_tx > 0)
876                 d40c->pending_tx--;
877         spin_unlock_irqrestore(&d40c->lock, flags);
878 }
879
880 static irqreturn_t d40_handle_interrupt(int irq, void *data)
881 {
882         static const struct d40_interrupt_lookup il[] = {
883                 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false,  0},
884                 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
885                 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
886                 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
887                 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true,   0},
888                 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true,  32},
889                 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true,  64},
890                 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true,  96},
891                 {D40_DREG_PCTIS,  D40_DREG_PCICR,  false, D40_PHY_CHAN},
892                 {D40_DREG_PCEIS,  D40_DREG_PCICR,  true,  D40_PHY_CHAN},
893         };
894
895         int i;
896         u32 regs[ARRAY_SIZE(il)];
897         u32 tmp;
898         u32 idx;
899         u32 row;
900         long chan = -1;
901         struct d40_chan *d40c;
902         unsigned long flags;
903         struct d40_base *base = data;
904
905         spin_lock_irqsave(&base->interrupt_lock, flags);
906
907         /* Read interrupt status of both logical and physical channels */
908         for (i = 0; i < ARRAY_SIZE(il); i++)
909                 regs[i] = readl(base->virtbase + il[i].src);
910
911         for (;;) {
912
913                 chan = find_next_bit((unsigned long *)regs,
914                                      BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
915
916                 /* No more set bits found? */
917                 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
918                         break;
919
920                 row = chan / BITS_PER_LONG;
921                 idx = chan & (BITS_PER_LONG - 1);
922
923                 /* ACK interrupt */
924                 tmp = readl(base->virtbase + il[row].clr);
925                 tmp |= 1 << idx;
926                 writel(tmp, base->virtbase + il[row].clr);
927
928                 if (il[row].offset == D40_PHY_CHAN)
929                         d40c = base->lookup_phy_chans[idx];
930                 else
931                         d40c = base->lookup_log_chans[il[row].offset + idx];
932                 spin_lock(&d40c->lock);
933
934                 if (!il[row].is_error)
935                         dma_tc_handle(d40c);
936                 else
937                         dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
938                                 __func__, chan, il[row].offset, idx);
939
940                 spin_unlock(&d40c->lock);
941         }
942
943         spin_unlock_irqrestore(&base->interrupt_lock, flags);
944
945         return IRQ_HANDLED;
946 }
947
948
949 static int d40_validate_conf(struct d40_chan *d40c,
950                              struct stedma40_chan_cfg *conf)
951 {
952         int res = 0;
953         u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
954         u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
955         bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
956                 == STEDMA40_CHANNEL_IN_LOG_MODE;
957
958         if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
959             dst_event_group == STEDMA40_DEV_DST_MEMORY) {
960                 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
961                         __func__);
962                 res = -EINVAL;
963         }
964
965         if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
966             src_event_group == STEDMA40_DEV_SRC_MEMORY) {
967                 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
968                         __func__);
969                 res = -EINVAL;
970         }
971
972         if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
973             dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
974                 dev_err(&d40c->chan.dev->device,
975                         "[%s] No event line\n", __func__);
976                 res = -EINVAL;
977         }
978
979         if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
980             (src_event_group != dst_event_group)) {
981                 dev_err(&d40c->chan.dev->device,
982                         "[%s] Invalid event group\n", __func__);
983                 res = -EINVAL;
984         }
985
986         if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
987                 /*
988                  * DMAC HW supports it. Will be added to this driver,
989                  * in case any dma client requires it.
990                  */
991                 dev_err(&d40c->chan.dev->device,
992                         "[%s] periph to periph not supported\n",
993                         __func__);
994                 res = -EINVAL;
995         }
996
997         return res;
998 }
999
1000 static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1001                                int log_event_line, bool is_log)
1002 {
1003         unsigned long flags;
1004         spin_lock_irqsave(&phy->lock, flags);
1005         if (!is_log) {
1006                 /* Physical interrupts are masked per physical full channel */
1007                 if (phy->allocated_src == D40_ALLOC_FREE &&
1008                     phy->allocated_dst == D40_ALLOC_FREE) {
1009                         phy->allocated_dst = D40_ALLOC_PHY;
1010                         phy->allocated_src = D40_ALLOC_PHY;
1011                         goto found;
1012                 } else
1013                         goto not_found;
1014         }
1015
1016         /* Logical channel */
1017         if (is_src) {
1018                 if (phy->allocated_src == D40_ALLOC_PHY)
1019                         goto not_found;
1020
1021                 if (phy->allocated_src == D40_ALLOC_FREE)
1022                         phy->allocated_src = D40_ALLOC_LOG_FREE;
1023
1024                 if (!(phy->allocated_src & (1 << log_event_line))) {
1025                         phy->allocated_src |= 1 << log_event_line;
1026                         goto found;
1027                 } else
1028                         goto not_found;
1029         } else {
1030                 if (phy->allocated_dst == D40_ALLOC_PHY)
1031                         goto not_found;
1032
1033                 if (phy->allocated_dst == D40_ALLOC_FREE)
1034                         phy->allocated_dst = D40_ALLOC_LOG_FREE;
1035
1036                 if (!(phy->allocated_dst & (1 << log_event_line))) {
1037                         phy->allocated_dst |= 1 << log_event_line;
1038                         goto found;
1039                 } else
1040                         goto not_found;
1041         }
1042
1043 not_found:
1044         spin_unlock_irqrestore(&phy->lock, flags);
1045         return false;
1046 found:
1047         spin_unlock_irqrestore(&phy->lock, flags);
1048         return true;
1049 }
1050
1051 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1052                                int log_event_line)
1053 {
1054         unsigned long flags;
1055         bool is_free = false;
1056
1057         spin_lock_irqsave(&phy->lock, flags);
1058         if (!log_event_line) {
1059                 /* Physical interrupts are masked per physical full channel */
1060                 phy->allocated_dst = D40_ALLOC_FREE;
1061                 phy->allocated_src = D40_ALLOC_FREE;
1062                 is_free = true;
1063                 goto out;
1064         }
1065
1066         /* Logical channel */
1067         if (is_src) {
1068                 phy->allocated_src &= ~(1 << log_event_line);
1069                 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1070                         phy->allocated_src = D40_ALLOC_FREE;
1071         } else {
1072                 phy->allocated_dst &= ~(1 << log_event_line);
1073                 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1074                         phy->allocated_dst = D40_ALLOC_FREE;
1075         }
1076
1077         is_free = ((phy->allocated_src | phy->allocated_dst) ==
1078                    D40_ALLOC_FREE);
1079
1080 out:
1081         spin_unlock_irqrestore(&phy->lock, flags);
1082
1083         return is_free;
1084 }
1085
1086 static int d40_allocate_channel(struct d40_chan *d40c)
1087 {
1088         int dev_type;
1089         int event_group;
1090         int event_line;
1091         struct d40_phy_res *phys;
1092         int i;
1093         int j;
1094         int log_num;
1095         bool is_src;
1096         bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1097                 == STEDMA40_CHANNEL_IN_LOG_MODE;
1098
1099
1100         phys = d40c->base->phy_res;
1101
1102         if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1103                 dev_type = d40c->dma_cfg.src_dev_type;
1104                 log_num = 2 * dev_type;
1105                 is_src = true;
1106         } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1107                    d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1108                 /* dst event lines are used for logical memcpy */
1109                 dev_type = d40c->dma_cfg.dst_dev_type;
1110                 log_num = 2 * dev_type + 1;
1111                 is_src = false;
1112         } else
1113                 return -EINVAL;
1114
1115         event_group = D40_TYPE_TO_GROUP(dev_type);
1116         event_line = D40_TYPE_TO_EVENT(dev_type);
1117
1118         if (!is_log) {
1119                 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1120                         /* Find physical half channel */
1121                         for (i = 0; i < d40c->base->num_phy_chans; i++) {
1122
1123                                 if (d40_alloc_mask_set(&phys[i], is_src,
1124                                                        0, is_log))
1125                                         goto found_phy;
1126                         }
1127                 } else
1128                         for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1129                                 int phy_num = j  + event_group * 2;
1130                                 for (i = phy_num; i < phy_num + 2; i++) {
1131                                         if (d40_alloc_mask_set(&phys[i], is_src,
1132                                                                0, is_log))
1133                                                 goto found_phy;
1134                                 }
1135                         }
1136                 return -EINVAL;
1137 found_phy:
1138                 d40c->phy_chan = &phys[i];
1139                 d40c->log_num = D40_PHY_CHAN;
1140                 goto out;
1141         }
1142         if (dev_type == -1)
1143                 return -EINVAL;
1144
1145         /* Find logical channel */
1146         for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1147                 int phy_num = j + event_group * 2;
1148                 /*
1149                  * Spread logical channels across all available physical rather
1150                  * than pack every logical channel at the first available phy
1151                  * channels.
1152                  */
1153                 if (is_src) {
1154                         for (i = phy_num; i < phy_num + 2; i++) {
1155                                 if (d40_alloc_mask_set(&phys[i], is_src,
1156                                                        event_line, is_log))
1157                                         goto found_log;
1158                         }
1159                 } else {
1160                         for (i = phy_num + 1; i >= phy_num; i--) {
1161                                 if (d40_alloc_mask_set(&phys[i], is_src,
1162                                                        event_line, is_log))
1163                                         goto found_log;
1164                         }
1165                 }
1166         }
1167         return -EINVAL;
1168
1169 found_log:
1170         d40c->phy_chan = &phys[i];
1171         d40c->log_num = log_num;
1172 out:
1173
1174         if (is_log)
1175                 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1176         else
1177                 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1178
1179         return 0;
1180
1181 }
1182
1183 static int d40_config_memcpy(struct d40_chan *d40c)
1184 {
1185         dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1186
1187         if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1188                 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1189                 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1190                 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1191                         memcpy[d40c->chan.chan_id];
1192
1193         } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1194                    dma_has_cap(DMA_SLAVE, cap)) {
1195                 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1196         } else {
1197                 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1198                         __func__);
1199                 return -EINVAL;
1200         }
1201
1202         return 0;
1203 }
1204
1205
1206 static int d40_free_dma(struct d40_chan *d40c)
1207 {
1208
1209         int res = 0;
1210         u32 event, dir;
1211         struct d40_phy_res *phy = d40c->phy_chan;
1212         bool is_src;
1213         struct d40_desc *d;
1214         struct d40_desc *_d;
1215
1216
1217         /* Terminate all queued and active transfers */
1218         d40_term_all(d40c);
1219
1220         /* Release client owned descriptors */
1221         if (!list_empty(&d40c->client))
1222                 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1223                         d40_pool_lli_free(d);
1224                         d40_desc_remove(d);
1225                         /* Return desc to free-list */
1226                         d40_desc_free(d40c, d);
1227                 }
1228
1229         if (phy == NULL) {
1230                 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1231                         __func__);
1232                 return -EINVAL;
1233         }
1234
1235         if (phy->allocated_src == D40_ALLOC_FREE &&
1236             phy->allocated_dst == D40_ALLOC_FREE) {
1237                 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1238                         __func__);
1239                 return -EINVAL;
1240         }
1241
1242         res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1243         if (res) {
1244                 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
1245                         __func__);
1246                 return res;
1247         }
1248
1249         if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1250             d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1251                 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1252                 dir = D40_CHAN_REG_SDLNK;
1253                 is_src = false;
1254         } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1255                 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1256                 dir = D40_CHAN_REG_SSLNK;
1257                 is_src = true;
1258         } else {
1259                 dev_err(&d40c->chan.dev->device,
1260                         "[%s] Unknown direction\n", __func__);
1261                 return -EINVAL;
1262         }
1263
1264         if (d40c->log_num != D40_PHY_CHAN) {
1265                 /*
1266                  * Release logical channel, deactivate the event line during
1267                  * the time physical res is suspended.
1268                  */
1269                 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
1270                        D40_EVENTLINE_MASK(event),
1271                        d40c->base->virtbase + D40_DREG_PCBASE +
1272                        phy->num * D40_DREG_PCDELTA + dir);
1273
1274                 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1275
1276                 /*
1277                  * Check if there are more logical allocation
1278                  * on this phy channel.
1279                  */
1280                 if (!d40_alloc_mask_free(phy, is_src, event)) {
1281                         /* Resume the other logical channels if any */
1282                         if (d40_chan_has_events(d40c)) {
1283                                 res = d40_channel_execute_command(d40c,
1284                                                                   D40_DMA_RUN);
1285                                 if (res) {
1286                                         dev_err(&d40c->chan.dev->device,
1287                                                 "[%s] Executing RUN command\n",
1288                                                 __func__);
1289                                         return res;
1290                                 }
1291                         }
1292                         return 0;
1293                 }
1294         } else
1295                 d40_alloc_mask_free(phy, is_src, 0);
1296
1297         /* Release physical channel */
1298         res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1299         if (res) {
1300                 dev_err(&d40c->chan.dev->device,
1301                         "[%s] Failed to stop channel\n", __func__);
1302                 return res;
1303         }
1304         d40c->phy_chan = NULL;
1305         /* Invalidate channel type */
1306         d40c->dma_cfg.channel_type = 0;
1307         d40c->base->lookup_phy_chans[phy->num] = NULL;
1308
1309         return 0;
1310 }
1311
1312 static int d40_pause(struct dma_chan *chan)
1313 {
1314         struct d40_chan *d40c =
1315                 container_of(chan, struct d40_chan, chan);
1316         int res;
1317         unsigned long flags;
1318
1319         spin_lock_irqsave(&d40c->lock, flags);
1320
1321         res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1322         if (res == 0) {
1323                 if (d40c->log_num != D40_PHY_CHAN) {
1324                         d40_config_set_event(d40c, false);
1325                         /* Resume the other logical channels if any */
1326                         if (d40_chan_has_events(d40c))
1327                                 res = d40_channel_execute_command(d40c,
1328                                                                   D40_DMA_RUN);
1329                 }
1330         }
1331
1332         spin_unlock_irqrestore(&d40c->lock, flags);
1333         return res;
1334 }
1335
1336 static bool d40_is_paused(struct d40_chan *d40c)
1337 {
1338         bool is_paused = false;
1339         unsigned long flags;
1340         void __iomem *active_reg;
1341         u32 status;
1342         u32 event;
1343         int res;
1344
1345         spin_lock_irqsave(&d40c->lock, flags);
1346
1347         if (d40c->log_num == D40_PHY_CHAN) {
1348                 if (d40c->phy_chan->num % 2 == 0)
1349                         active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1350                 else
1351                         active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1352
1353                 status = (readl(active_reg) &
1354                           D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1355                         D40_CHAN_POS(d40c->phy_chan->num);
1356                 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1357                         is_paused = true;
1358
1359                 goto _exit;
1360         }
1361
1362         res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1363         if (res != 0)
1364                 goto _exit;
1365
1366         if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1367             d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1368                 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1369         else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1370                 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1371         else {
1372                 dev_err(&d40c->chan.dev->device,
1373                         "[%s] Unknown direction\n", __func__);
1374                 goto _exit;
1375         }
1376         status = d40_chan_has_events(d40c);
1377         status = (status & D40_EVENTLINE_MASK(event)) >>
1378                 D40_EVENTLINE_POS(event);
1379
1380         if (status != D40_DMA_RUN)
1381                 is_paused = true;
1382
1383         /* Resume the other logical channels if any */
1384         if (d40_chan_has_events(d40c))
1385                 res = d40_channel_execute_command(d40c,
1386                                                   D40_DMA_RUN);
1387
1388 _exit:
1389         spin_unlock_irqrestore(&d40c->lock, flags);
1390         return is_paused;
1391
1392 }
1393
1394
1395 static bool d40_tx_is_linked(struct d40_chan *d40c)
1396 {
1397         bool is_link;
1398
1399         if (d40c->log_num != D40_PHY_CHAN)
1400                 is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK;
1401         else
1402                 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1403                                 d40c->phy_chan->num * D40_DREG_PCDELTA +
1404                                 D40_CHAN_REG_SDLNK) &
1405                         D40_SREG_LNK_PHYS_LNK_MASK;
1406         return is_link;
1407 }
1408
1409 static u32 d40_residue(struct d40_chan *d40c)
1410 {
1411         u32 num_elt;
1412
1413         if (d40c->log_num != D40_PHY_CHAN)
1414                 num_elt = (readl(&d40c->lcpa->lcsp2) &  D40_MEM_LCSP2_ECNT_MASK)
1415                         >> D40_MEM_LCSP2_ECNT_POS;
1416         else
1417                 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1418                                  d40c->phy_chan->num * D40_DREG_PCDELTA +
1419                                  D40_CHAN_REG_SDELT) &
1420                            D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
1421         return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1422 }
1423
1424 static int d40_resume(struct dma_chan *chan)
1425 {
1426         struct d40_chan *d40c =
1427                 container_of(chan, struct d40_chan, chan);
1428         int res = 0;
1429         unsigned long flags;
1430
1431         spin_lock_irqsave(&d40c->lock, flags);
1432
1433         if (d40c->log_num != D40_PHY_CHAN) {
1434                 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1435                 if (res)
1436                         goto out;
1437
1438                 /* If bytes left to transfer or linked tx resume job */
1439                 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1440                         d40_config_set_event(d40c, true);
1441                         res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1442                 }
1443         } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1444                 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1445
1446 out:
1447         spin_unlock_irqrestore(&d40c->lock, flags);
1448         return res;
1449 }
1450
1451 static u32 stedma40_residue(struct dma_chan *chan)
1452 {
1453         struct d40_chan *d40c =
1454                 container_of(chan, struct d40_chan, chan);
1455         u32 bytes_left;
1456         unsigned long flags;
1457
1458         spin_lock_irqsave(&d40c->lock, flags);
1459         bytes_left = d40_residue(d40c);
1460         spin_unlock_irqrestore(&d40c->lock, flags);
1461
1462         return bytes_left;
1463 }
1464
1465 /* Public DMA functions in addition to the DMA engine framework */
1466
1467 int stedma40_set_psize(struct dma_chan *chan,
1468                        int src_psize,
1469                        int dst_psize)
1470 {
1471         struct d40_chan *d40c =
1472                 container_of(chan, struct d40_chan, chan);
1473         unsigned long flags;
1474
1475         spin_lock_irqsave(&d40c->lock, flags);
1476
1477         if (d40c->log_num != D40_PHY_CHAN) {
1478                 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1479                 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1480                 d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1481                 d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1482                 goto out;
1483         }
1484
1485         if (src_psize == STEDMA40_PSIZE_PHY_1)
1486                 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1487         else {
1488                 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1489                 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1490                                        D40_SREG_CFG_PSIZE_POS);
1491                 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1492         }
1493
1494         if (dst_psize == STEDMA40_PSIZE_PHY_1)
1495                 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1496         else {
1497                 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1498                 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1499                                        D40_SREG_CFG_PSIZE_POS);
1500                 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1501         }
1502 out:
1503         spin_unlock_irqrestore(&d40c->lock, flags);
1504         return 0;
1505 }
1506 EXPORT_SYMBOL(stedma40_set_psize);
1507
1508 struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1509                                                    struct scatterlist *sgl_dst,
1510                                                    struct scatterlist *sgl_src,
1511                                                    unsigned int sgl_len,
1512                                                    unsigned long dma_flags)
1513 {
1514         int res;
1515         struct d40_desc *d40d;
1516         struct d40_chan *d40c = container_of(chan, struct d40_chan,
1517                                              chan);
1518         unsigned long flags;
1519
1520         if (d40c->phy_chan == NULL) {
1521                 dev_err(&d40c->chan.dev->device,
1522                         "[%s] Unallocated channel.\n", __func__);
1523                 return ERR_PTR(-EINVAL);
1524         }
1525
1526         spin_lock_irqsave(&d40c->lock, flags);
1527         d40d = d40_desc_get(d40c);
1528
1529         if (d40d == NULL)
1530                 goto err;
1531
1532         d40d->lli_len = sgl_len;
1533         d40d->lli_tx_len = d40d->lli_len;
1534         d40d->txd.flags = dma_flags;
1535
1536         if (d40c->log_num != D40_PHY_CHAN) {
1537                 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1538                         d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1539
1540                 if (sgl_len > 1)
1541                         /*
1542                          * Check if there is space available in lcla. If not,
1543                          * split list into 1-length and run only in lcpa
1544                          * space.
1545                          */
1546                         if (d40_lcla_id_get(d40c,
1547                                             &d40c->base->lcla_pool) != 0)
1548                                 d40d->lli_tx_len = 1;
1549
1550                 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1551                         dev_err(&d40c->chan.dev->device,
1552                                 "[%s] Out of memory\n", __func__);
1553                         goto err;
1554                 }
1555
1556                 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1557                                          sgl_src,
1558                                          sgl_len,
1559                                          d40d->lli_log.src,
1560                                          d40c->log_def.lcsp1,
1561                                          d40c->dma_cfg.src_info.data_width,
1562                                          dma_flags & DMA_PREP_INTERRUPT,
1563                                          d40d->lli_tx_len,
1564                                          d40c->base->plat_data->llis_per_log);
1565
1566                 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1567                                          sgl_dst,
1568                                          sgl_len,
1569                                          d40d->lli_log.dst,
1570                                          d40c->log_def.lcsp3,
1571                                          d40c->dma_cfg.dst_info.data_width,
1572                                          dma_flags & DMA_PREP_INTERRUPT,
1573                                          d40d->lli_tx_len,
1574                                          d40c->base->plat_data->llis_per_log);
1575
1576
1577         } else {
1578                 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1579                         dev_err(&d40c->chan.dev->device,
1580                                 "[%s] Out of memory\n", __func__);
1581                         goto err;
1582                 }
1583
1584                 res = d40_phy_sg_to_lli(sgl_src,
1585                                         sgl_len,
1586                                         0,
1587                                         d40d->lli_phy.src,
1588                                         d40d->lli_phy.src_addr,
1589                                         d40c->src_def_cfg,
1590                                         d40c->dma_cfg.src_info.data_width,
1591                                         d40c->dma_cfg.src_info.psize,
1592                                         true);
1593
1594                 if (res < 0)
1595                         goto err;
1596
1597                 res = d40_phy_sg_to_lli(sgl_dst,
1598                                         sgl_len,
1599                                         0,
1600                                         d40d->lli_phy.dst,
1601                                         d40d->lli_phy.dst_addr,
1602                                         d40c->dst_def_cfg,
1603                                         d40c->dma_cfg.dst_info.data_width,
1604                                         d40c->dma_cfg.dst_info.psize,
1605                                         true);
1606
1607                 if (res < 0)
1608                         goto err;
1609
1610                 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1611                                       d40d->lli_pool.size, DMA_TO_DEVICE);
1612         }
1613
1614         dma_async_tx_descriptor_init(&d40d->txd, chan);
1615
1616         d40d->txd.tx_submit = d40_tx_submit;
1617
1618         spin_unlock_irqrestore(&d40c->lock, flags);
1619
1620         return &d40d->txd;
1621 err:
1622         spin_unlock_irqrestore(&d40c->lock, flags);
1623         return NULL;
1624 }
1625 EXPORT_SYMBOL(stedma40_memcpy_sg);
1626
1627 bool stedma40_filter(struct dma_chan *chan, void *data)
1628 {
1629         struct stedma40_chan_cfg *info = data;
1630         struct d40_chan *d40c =
1631                 container_of(chan, struct d40_chan, chan);
1632         int err;
1633
1634         if (data) {
1635                 err = d40_validate_conf(d40c, info);
1636                 if (!err)
1637                         d40c->dma_cfg = *info;
1638         } else
1639                 err = d40_config_memcpy(d40c);
1640
1641         return err == 0;
1642 }
1643 EXPORT_SYMBOL(stedma40_filter);
1644
1645 /* DMA ENGINE functions */
1646 static int d40_alloc_chan_resources(struct dma_chan *chan)
1647 {
1648         int err;
1649         unsigned long flags;
1650         struct d40_chan *d40c =
1651                 container_of(chan, struct d40_chan, chan);
1652         bool is_free_phy;
1653         spin_lock_irqsave(&d40c->lock, flags);
1654
1655         d40c->completed = chan->cookie = 1;
1656
1657         /*
1658          * If no dma configuration is set (channel_type == 0)
1659          * use default configuration (memcpy)
1660          */
1661         if (d40c->dma_cfg.channel_type == 0) {
1662                 err = d40_config_memcpy(d40c);
1663                 if (err) {
1664                         dev_err(&d40c->chan.dev->device,
1665                                 "[%s] Failed to configure memcpy channel\n",
1666                                 __func__);
1667                         goto fail;
1668                 }
1669         }
1670         is_free_phy = (d40c->phy_chan == NULL);
1671
1672         err = d40_allocate_channel(d40c);
1673         if (err) {
1674                 dev_err(&d40c->chan.dev->device,
1675                         "[%s] Failed to allocate channel\n", __func__);
1676                 goto fail;
1677         }
1678
1679         /* Fill in basic CFG register values */
1680         d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1681                     &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1682
1683         if (d40c->log_num != D40_PHY_CHAN) {
1684                 d40_log_cfg(&d40c->dma_cfg,
1685                             &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1686
1687                 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1688                         d40c->lcpa = d40c->base->lcpa_base +
1689                           d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1690                 else
1691                         d40c->lcpa = d40c->base->lcpa_base +
1692                           d40c->dma_cfg.dst_dev_type *
1693                           D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1694         }
1695
1696         /*
1697          * Only write channel configuration to the DMA if the physical
1698          * resource is free. In case of multiple logical channels
1699          * on the same physical resource, only the first write is necessary.
1700          */
1701         if (is_free_phy) {
1702                 err = d40_config_write(d40c);
1703                 if (err) {
1704                         dev_err(&d40c->chan.dev->device,
1705                                 "[%s] Failed to configure channel\n",
1706                                 __func__);
1707                 }
1708         }
1709 fail:
1710         spin_unlock_irqrestore(&d40c->lock, flags);
1711         return err;
1712 }
1713
1714 static void d40_free_chan_resources(struct dma_chan *chan)
1715 {
1716         struct d40_chan *d40c =
1717                 container_of(chan, struct d40_chan, chan);
1718         int err;
1719         unsigned long flags;
1720
1721         if (d40c->phy_chan == NULL) {
1722                 dev_err(&d40c->chan.dev->device,
1723                         "[%s] Cannot free unallocated channel\n", __func__);
1724                 return;
1725         }
1726
1727
1728         spin_lock_irqsave(&d40c->lock, flags);
1729
1730         err = d40_free_dma(d40c);
1731
1732         if (err)
1733                 dev_err(&d40c->chan.dev->device,
1734                         "[%s] Failed to free channel\n", __func__);
1735         spin_unlock_irqrestore(&d40c->lock, flags);
1736 }
1737
1738 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1739                                                        dma_addr_t dst,
1740                                                        dma_addr_t src,
1741                                                        size_t size,
1742                                                        unsigned long dma_flags)
1743 {
1744         struct d40_desc *d40d;
1745         struct d40_chan *d40c = container_of(chan, struct d40_chan,
1746                                              chan);
1747         unsigned long flags;
1748         int err = 0;
1749
1750         if (d40c->phy_chan == NULL) {
1751                 dev_err(&d40c->chan.dev->device,
1752                         "[%s] Channel is not allocated.\n", __func__);
1753                 return ERR_PTR(-EINVAL);
1754         }
1755
1756         spin_lock_irqsave(&d40c->lock, flags);
1757         d40d = d40_desc_get(d40c);
1758
1759         if (d40d == NULL) {
1760                 dev_err(&d40c->chan.dev->device,
1761                         "[%s] Descriptor is NULL\n", __func__);
1762                 goto err;
1763         }
1764
1765         d40d->txd.flags = dma_flags;
1766
1767         dma_async_tx_descriptor_init(&d40d->txd, chan);
1768
1769         d40d->txd.tx_submit = d40_tx_submit;
1770
1771         if (d40c->log_num != D40_PHY_CHAN) {
1772
1773                 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1774                         dev_err(&d40c->chan.dev->device,
1775                                 "[%s] Out of memory\n", __func__);
1776                         goto err;
1777                 }
1778                 d40d->lli_len = 1;
1779                 d40d->lli_tx_len = 1;
1780
1781                 d40_log_fill_lli(d40d->lli_log.src,
1782                                  src,
1783                                  size,
1784                                  0,
1785                                  d40c->log_def.lcsp1,
1786                                  d40c->dma_cfg.src_info.data_width,
1787                                  true, true);
1788
1789                 d40_log_fill_lli(d40d->lli_log.dst,
1790                                  dst,
1791                                  size,
1792                                  0,
1793                                  d40c->log_def.lcsp3,
1794                                  d40c->dma_cfg.dst_info.data_width,
1795                                  true, true);
1796
1797         } else {
1798
1799                 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1800                         dev_err(&d40c->chan.dev->device,
1801                                 "[%s] Out of memory\n", __func__);
1802                         goto err;
1803                 }
1804
1805                 err = d40_phy_fill_lli(d40d->lli_phy.src,
1806                                        src,
1807                                        size,
1808                                        d40c->dma_cfg.src_info.psize,
1809                                        0,
1810                                        d40c->src_def_cfg,
1811                                        true,
1812                                        d40c->dma_cfg.src_info.data_width,
1813                                        false);
1814                 if (err)
1815                         goto err_fill_lli;
1816
1817                 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1818                                        dst,
1819                                        size,
1820                                        d40c->dma_cfg.dst_info.psize,
1821                                        0,
1822                                        d40c->dst_def_cfg,
1823                                        true,
1824                                        d40c->dma_cfg.dst_info.data_width,
1825                                        false);
1826
1827                 if (err)
1828                         goto err_fill_lli;
1829
1830                 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1831                                       d40d->lli_pool.size, DMA_TO_DEVICE);
1832         }
1833
1834         spin_unlock_irqrestore(&d40c->lock, flags);
1835         return &d40d->txd;
1836
1837 err_fill_lli:
1838         dev_err(&d40c->chan.dev->device,
1839                 "[%s] Failed filling in PHY LLI\n", __func__);
1840         d40_pool_lli_free(d40d);
1841 err:
1842         spin_unlock_irqrestore(&d40c->lock, flags);
1843         return NULL;
1844 }
1845
1846 static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1847                                  struct d40_chan *d40c,
1848                                  struct scatterlist *sgl,
1849                                  unsigned int sg_len,
1850                                  enum dma_data_direction direction,
1851                                  unsigned long dma_flags)
1852 {
1853         dma_addr_t dev_addr = 0;
1854         int total_size;
1855
1856         if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1857                 dev_err(&d40c->chan.dev->device,
1858                         "[%s] Out of memory\n", __func__);
1859                 return -ENOMEM;
1860         }
1861
1862         d40d->lli_len = sg_len;
1863         if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1864                 d40d->lli_tx_len = d40d->lli_len;
1865         else
1866                 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1867
1868         if (sg_len > 1)
1869                 /*
1870                  * Check if there is space available in lcla.
1871                  * If not, split list into 1-length and run only
1872                  * in lcpa space.
1873                  */
1874                 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
1875                         d40d->lli_tx_len = 1;
1876
1877         if (direction == DMA_FROM_DEVICE)
1878                 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1879         else if (direction == DMA_TO_DEVICE)
1880                 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1881         else
1882                 return -EINVAL;
1883
1884         total_size = d40_log_sg_to_dev(&d40c->lcla,
1885                                        sgl, sg_len,
1886                                        &d40d->lli_log,
1887                                        &d40c->log_def,
1888                                        d40c->dma_cfg.src_info.data_width,
1889                                        d40c->dma_cfg.dst_info.data_width,
1890                                        direction,
1891                                        dma_flags & DMA_PREP_INTERRUPT,
1892                                        dev_addr, d40d->lli_tx_len,
1893                                        d40c->base->plat_data->llis_per_log);
1894
1895         if (total_size < 0)
1896                 return -EINVAL;
1897
1898         return 0;
1899 }
1900
1901 static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1902                                  struct d40_chan *d40c,
1903                                  struct scatterlist *sgl,
1904                                  unsigned int sgl_len,
1905                                  enum dma_data_direction direction,
1906                                  unsigned long dma_flags)
1907 {
1908         dma_addr_t src_dev_addr;
1909         dma_addr_t dst_dev_addr;
1910         int res;
1911
1912         if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1913                 dev_err(&d40c->chan.dev->device,
1914                         "[%s] Out of memory\n", __func__);
1915                 return -ENOMEM;
1916         }
1917
1918         d40d->lli_len = sgl_len;
1919         d40d->lli_tx_len = sgl_len;
1920
1921         if (direction == DMA_FROM_DEVICE) {
1922                 dst_dev_addr = 0;
1923                 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1924         } else if (direction == DMA_TO_DEVICE) {
1925                 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1926                 src_dev_addr = 0;
1927         } else
1928                 return -EINVAL;
1929
1930         res = d40_phy_sg_to_lli(sgl,
1931                                 sgl_len,
1932                                 src_dev_addr,
1933                                 d40d->lli_phy.src,
1934                                 d40d->lli_phy.src_addr,
1935                                 d40c->src_def_cfg,
1936                                 d40c->dma_cfg.src_info.data_width,
1937                                 d40c->dma_cfg.src_info.psize,
1938                                 true);
1939         if (res < 0)
1940                 return res;
1941
1942         res = d40_phy_sg_to_lli(sgl,
1943                                 sgl_len,
1944                                 dst_dev_addr,
1945                                 d40d->lli_phy.dst,
1946                                 d40d->lli_phy.dst_addr,
1947                                 d40c->dst_def_cfg,
1948                                 d40c->dma_cfg.dst_info.data_width,
1949                                 d40c->dma_cfg.dst_info.psize,
1950                                  true);
1951         if (res < 0)
1952                 return res;
1953
1954         (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1955                               d40d->lli_pool.size, DMA_TO_DEVICE);
1956         return 0;
1957 }
1958
1959 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1960                                                          struct scatterlist *sgl,
1961                                                          unsigned int sg_len,
1962                                                          enum dma_data_direction direction,
1963                                                          unsigned long dma_flags)
1964 {
1965         struct d40_desc *d40d;
1966         struct d40_chan *d40c = container_of(chan, struct d40_chan,
1967                                              chan);
1968         unsigned long flags;
1969         int err;
1970
1971         if (d40c->phy_chan == NULL) {
1972                 dev_err(&d40c->chan.dev->device,
1973                         "[%s] Cannot prepare unallocated channel\n", __func__);
1974                 return ERR_PTR(-EINVAL);
1975         }
1976
1977         if (d40c->dma_cfg.pre_transfer)
1978                 d40c->dma_cfg.pre_transfer(chan,
1979                                            d40c->dma_cfg.pre_transfer_data,
1980                                            sg_dma_len(sgl));
1981
1982         spin_lock_irqsave(&d40c->lock, flags);
1983         d40d = d40_desc_get(d40c);
1984         spin_unlock_irqrestore(&d40c->lock, flags);
1985
1986         if (d40d == NULL)
1987                 return NULL;
1988
1989         if (d40c->log_num != D40_PHY_CHAN)
1990                 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
1991                                             direction, dma_flags);
1992         else
1993                 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
1994                                             direction, dma_flags);
1995         if (err) {
1996                 dev_err(&d40c->chan.dev->device,
1997                         "[%s] Failed to prepare %s slave sg job: %d\n",
1998                         __func__,
1999                         d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2000                 return NULL;
2001         }
2002
2003         d40d->txd.flags = dma_flags;
2004
2005         dma_async_tx_descriptor_init(&d40d->txd, chan);
2006
2007         d40d->txd.tx_submit = d40_tx_submit;
2008
2009         return &d40d->txd;
2010 }
2011
2012 static enum dma_status d40_tx_status(struct dma_chan *chan,
2013                                      dma_cookie_t cookie,
2014                                      struct dma_tx_state *txstate)
2015 {
2016         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2017         dma_cookie_t last_used;
2018         dma_cookie_t last_complete;
2019         int ret;
2020
2021         if (d40c->phy_chan == NULL) {
2022                 dev_err(&d40c->chan.dev->device,
2023                         "[%s] Cannot read status of unallocated channel\n",
2024                         __func__);
2025                 return -EINVAL;
2026         }
2027
2028         last_complete = d40c->completed;
2029         last_used = chan->cookie;
2030
2031         if (d40_is_paused(d40c))
2032                 ret = DMA_PAUSED;
2033         else
2034                 ret = dma_async_is_complete(cookie, last_complete, last_used);
2035
2036         dma_set_tx_state(txstate, last_complete, last_used,
2037                          stedma40_residue(chan));
2038
2039         return ret;
2040 }
2041
2042 static void d40_issue_pending(struct dma_chan *chan)
2043 {
2044         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2045         unsigned long flags;
2046
2047         if (d40c->phy_chan == NULL) {
2048                 dev_err(&d40c->chan.dev->device,
2049                         "[%s] Channel is not allocated!\n", __func__);
2050                 return;
2051         }
2052
2053         spin_lock_irqsave(&d40c->lock, flags);
2054
2055         /* Busy means that pending jobs are already being processed */
2056         if (!d40c->busy)
2057                 (void) d40_queue_start(d40c);
2058
2059         spin_unlock_irqrestore(&d40c->lock, flags);
2060 }
2061
2062 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2063                        unsigned long arg)
2064 {
2065         unsigned long flags;
2066         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2067
2068         if (d40c->phy_chan == NULL) {
2069                 dev_err(&d40c->chan.dev->device,
2070                         "[%s] Channel is not allocated!\n", __func__);
2071                 return -EINVAL;
2072         }
2073
2074         switch (cmd) {
2075         case DMA_TERMINATE_ALL:
2076                 spin_lock_irqsave(&d40c->lock, flags);
2077                 d40_term_all(d40c);
2078                 spin_unlock_irqrestore(&d40c->lock, flags);
2079                 return 0;
2080         case DMA_PAUSE:
2081                 return d40_pause(chan);
2082         case DMA_RESUME:
2083                 return d40_resume(chan);
2084         }
2085
2086         /* Other commands are unimplemented */
2087         return -ENXIO;
2088 }
2089
2090 /* Initialization functions */
2091
2092 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2093                                  struct d40_chan *chans, int offset,
2094                                  int num_chans)
2095 {
2096         int i = 0;
2097         struct d40_chan *d40c;
2098
2099         INIT_LIST_HEAD(&dma->channels);
2100
2101         for (i = offset; i < offset + num_chans; i++) {
2102                 d40c = &chans[i];
2103                 d40c->base = base;
2104                 d40c->chan.device = dma;
2105
2106                 /* Invalidate lcla element */
2107                 d40c->lcla.src_id = -1;
2108                 d40c->lcla.dst_id = -1;
2109
2110                 spin_lock_init(&d40c->lock);
2111
2112                 d40c->log_num = D40_PHY_CHAN;
2113
2114                 INIT_LIST_HEAD(&d40c->active);
2115                 INIT_LIST_HEAD(&d40c->queue);
2116                 INIT_LIST_HEAD(&d40c->client);
2117
2118                 tasklet_init(&d40c->tasklet, dma_tasklet,
2119                              (unsigned long) d40c);
2120
2121                 list_add_tail(&d40c->chan.device_node,
2122                               &dma->channels);
2123         }
2124 }
2125
2126 static int __init d40_dmaengine_init(struct d40_base *base,
2127                                      int num_reserved_chans)
2128 {
2129         int err ;
2130
2131         d40_chan_init(base, &base->dma_slave, base->log_chans,
2132                       0, base->num_log_chans);
2133
2134         dma_cap_zero(base->dma_slave.cap_mask);
2135         dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2136
2137         base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2138         base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2139         base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2140         base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2141         base->dma_slave.device_tx_status = d40_tx_status;
2142         base->dma_slave.device_issue_pending = d40_issue_pending;
2143         base->dma_slave.device_control = d40_control;
2144         base->dma_slave.dev = base->dev;
2145
2146         err = dma_async_device_register(&base->dma_slave);
2147
2148         if (err) {
2149                 dev_err(base->dev,
2150                         "[%s] Failed to register slave channels\n",
2151                         __func__);
2152                 goto failure1;
2153         }
2154
2155         d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2156                       base->num_log_chans, base->plat_data->memcpy_len);
2157
2158         dma_cap_zero(base->dma_memcpy.cap_mask);
2159         dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2160
2161         base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2162         base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2163         base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2164         base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2165         base->dma_memcpy.device_tx_status = d40_tx_status;
2166         base->dma_memcpy.device_issue_pending = d40_issue_pending;
2167         base->dma_memcpy.device_control = d40_control;
2168         base->dma_memcpy.dev = base->dev;
2169         /*
2170          * This controller can only access address at even
2171          * 32bit boundaries, i.e. 2^2
2172          */
2173         base->dma_memcpy.copy_align = 2;
2174
2175         err = dma_async_device_register(&base->dma_memcpy);
2176
2177         if (err) {
2178                 dev_err(base->dev,
2179                         "[%s] Failed to regsiter memcpy only channels\n",
2180                         __func__);
2181                 goto failure2;
2182         }
2183
2184         d40_chan_init(base, &base->dma_both, base->phy_chans,
2185                       0, num_reserved_chans);
2186
2187         dma_cap_zero(base->dma_both.cap_mask);
2188         dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2189         dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2190
2191         base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2192         base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2193         base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2194         base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2195         base->dma_both.device_tx_status = d40_tx_status;
2196         base->dma_both.device_issue_pending = d40_issue_pending;
2197         base->dma_both.device_control = d40_control;
2198         base->dma_both.dev = base->dev;
2199         base->dma_both.copy_align = 2;
2200         err = dma_async_device_register(&base->dma_both);
2201
2202         if (err) {
2203                 dev_err(base->dev,
2204                         "[%s] Failed to register logical and physical capable channels\n",
2205                         __func__);
2206                 goto failure3;
2207         }
2208         return 0;
2209 failure3:
2210         dma_async_device_unregister(&base->dma_memcpy);
2211 failure2:
2212         dma_async_device_unregister(&base->dma_slave);
2213 failure1:
2214         return err;
2215 }
2216
2217 /* Initialization functions. */
2218
2219 static int __init d40_phy_res_init(struct d40_base *base)
2220 {
2221         int i;
2222         int num_phy_chans_avail = 0;
2223         u32 val[2];
2224         int odd_even_bit = -2;
2225
2226         val[0] = readl(base->virtbase + D40_DREG_PRSME);
2227         val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2228
2229         for (i = 0; i < base->num_phy_chans; i++) {
2230                 base->phy_res[i].num = i;
2231                 odd_even_bit += 2 * ((i % 2) == 0);
2232                 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2233                         /* Mark security only channels as occupied */
2234                         base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2235                         base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2236                 } else {
2237                         base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2238                         base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2239                         num_phy_chans_avail++;
2240                 }
2241                 spin_lock_init(&base->phy_res[i].lock);
2242         }
2243         dev_info(base->dev, "%d of %d physical DMA channels available\n",
2244                  num_phy_chans_avail, base->num_phy_chans);
2245
2246         /* Verify settings extended vs standard */
2247         val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2248
2249         for (i = 0; i < base->num_phy_chans; i++) {
2250
2251                 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2252                     (val[0] & 0x3) != 1)
2253                         dev_info(base->dev,
2254                                  "[%s] INFO: channel %d is misconfigured (%d)\n",
2255                                  __func__, i, val[0] & 0x3);
2256
2257                 val[0] = val[0] >> 2;
2258         }
2259
2260         return num_phy_chans_avail;
2261 }
2262
2263 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2264 {
2265         static const struct d40_reg_val dma_id_regs[] = {
2266                 /* Peripheral Id */
2267                 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2268                 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2269                 /*
2270                  * D40_DREG_PERIPHID2 Depends on HW revision:
2271                  *  MOP500/HREF ED has 0x0008,
2272                  *  ? has 0x0018,
2273                  *  HREF V1 has 0x0028
2274                  */
2275                 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2276
2277                 /* PCell Id */
2278                 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2279                 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2280                 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2281                 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2282         };
2283         struct stedma40_platform_data *plat_data;
2284         struct clk *clk = NULL;
2285         void __iomem *virtbase = NULL;
2286         struct resource *res = NULL;
2287         struct d40_base *base = NULL;
2288         int num_log_chans = 0;
2289         int num_phy_chans;
2290         int i;
2291
2292         clk = clk_get(&pdev->dev, NULL);
2293
2294         if (IS_ERR(clk)) {
2295                 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2296                         __func__);
2297                 goto failure;
2298         }
2299
2300         clk_enable(clk);
2301
2302         /* Get IO for DMAC base address */
2303         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2304         if (!res)
2305                 goto failure;
2306
2307         if (request_mem_region(res->start, resource_size(res),
2308                                D40_NAME " I/O base") == NULL)
2309                 goto failure;
2310
2311         virtbase = ioremap(res->start, resource_size(res));
2312         if (!virtbase)
2313                 goto failure;
2314
2315         /* HW version check */
2316         for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2317                 if (dma_id_regs[i].val !=
2318                     readl(virtbase + dma_id_regs[i].reg)) {
2319                         dev_err(&pdev->dev,
2320                                 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2321                                 __func__,
2322                                 dma_id_regs[i].val,
2323                                 dma_id_regs[i].reg,
2324                                 readl(virtbase + dma_id_regs[i].reg));
2325                         goto failure;
2326                 }
2327         }
2328
2329         i = readl(virtbase + D40_DREG_PERIPHID2);
2330
2331         if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
2332                 dev_err(&pdev->dev,
2333                         "[%s] Unknown designer! Got %x wanted %x\n",
2334                         __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
2335                 goto failure;
2336         }
2337
2338         /* The number of physical channels on this HW */
2339         num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2340
2341         dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2342                  (i >> 4) & 0xf, res->start);
2343
2344         plat_data = pdev->dev.platform_data;
2345
2346         /* Count the number of logical channels in use */
2347         for (i = 0; i < plat_data->dev_len; i++)
2348                 if (plat_data->dev_rx[i] != 0)
2349                         num_log_chans++;
2350
2351         for (i = 0; i < plat_data->dev_len; i++)
2352                 if (plat_data->dev_tx[i] != 0)
2353                         num_log_chans++;
2354
2355         base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2356                        (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2357                        sizeof(struct d40_chan), GFP_KERNEL);
2358
2359         if (base == NULL) {
2360                 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2361                 goto failure;
2362         }
2363
2364         base->clk = clk;
2365         base->num_phy_chans = num_phy_chans;
2366         base->num_log_chans = num_log_chans;
2367         base->phy_start = res->start;
2368         base->phy_size = resource_size(res);
2369         base->virtbase = virtbase;
2370         base->plat_data = plat_data;
2371         base->dev = &pdev->dev;
2372         base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2373         base->log_chans = &base->phy_chans[num_phy_chans];
2374
2375         base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2376                                 GFP_KERNEL);
2377         if (!base->phy_res)
2378                 goto failure;
2379
2380         base->lookup_phy_chans = kzalloc(num_phy_chans *
2381                                          sizeof(struct d40_chan *),
2382                                          GFP_KERNEL);
2383         if (!base->lookup_phy_chans)
2384                 goto failure;
2385
2386         if (num_log_chans + plat_data->memcpy_len) {
2387                 /*
2388                  * The max number of logical channels are event lines for all
2389                  * src devices and dst devices
2390                  */
2391                 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2392                                                  sizeof(struct d40_chan *),
2393                                                  GFP_KERNEL);
2394                 if (!base->lookup_log_chans)
2395                         goto failure;
2396         }
2397         base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2398                                             GFP_KERNEL);
2399         if (!base->lcla_pool.alloc_map)
2400                 goto failure;
2401
2402         base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2403                                             0, SLAB_HWCACHE_ALIGN,
2404                                             NULL);
2405         if (base->desc_slab == NULL)
2406                 goto failure;
2407
2408         return base;
2409
2410 failure:
2411         if (clk) {
2412                 clk_disable(clk);
2413                 clk_put(clk);
2414         }
2415         if (virtbase)
2416                 iounmap(virtbase);
2417         if (res)
2418                 release_mem_region(res->start,
2419                                    resource_size(res));
2420         if (virtbase)
2421                 iounmap(virtbase);
2422
2423         if (base) {
2424                 kfree(base->lcla_pool.alloc_map);
2425                 kfree(base->lookup_log_chans);
2426                 kfree(base->lookup_phy_chans);
2427                 kfree(base->phy_res);
2428                 kfree(base);
2429         }
2430
2431         return NULL;
2432 }
2433
2434 static void __init d40_hw_init(struct d40_base *base)
2435 {
2436
2437         static const struct d40_reg_val dma_init_reg[] = {
2438                 /* Clock every part of the DMA block from start */
2439                 { .reg = D40_DREG_GCC,    .val = 0x0000ff01},
2440
2441                 /* Interrupts on all logical channels */
2442                 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2443                 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2444                 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2445                 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2446                 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2447                 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2448                 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2449                 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2450                 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2451                 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2452                 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2453                 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2454         };
2455         int i;
2456         u32 prmseo[2] = {0, 0};
2457         u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2458         u32 pcmis = 0;
2459         u32 pcicr = 0;
2460
2461         for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2462                 writel(dma_init_reg[i].val,
2463                        base->virtbase + dma_init_reg[i].reg);
2464
2465         /* Configure all our dma channels to default settings */
2466         for (i = 0; i < base->num_phy_chans; i++) {
2467
2468                 activeo[i % 2] = activeo[i % 2] << 2;
2469
2470                 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2471                     == D40_ALLOC_PHY) {
2472                         activeo[i % 2] |= 3;
2473                         continue;
2474                 }
2475
2476                 /* Enable interrupt # */
2477                 pcmis = (pcmis << 1) | 1;
2478
2479                 /* Clear interrupt # */
2480                 pcicr = (pcicr << 1) | 1;
2481
2482                 /* Set channel to physical mode */
2483                 prmseo[i % 2] = prmseo[i % 2] << 2;
2484                 prmseo[i % 2] |= 1;
2485
2486         }
2487
2488         writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2489         writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2490         writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2491         writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2492
2493         /* Write which interrupt to enable */
2494         writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2495
2496         /* Write which interrupt to clear */
2497         writel(pcicr, base->virtbase + D40_DREG_PCICR);
2498
2499 }
2500
2501 static int __init d40_probe(struct platform_device *pdev)
2502 {
2503         int err;
2504         int ret = -ENOENT;
2505         struct d40_base *base;
2506         struct resource *res = NULL;
2507         int num_reserved_chans;
2508         u32 val;
2509
2510         base = d40_hw_detect_init(pdev);
2511
2512         if (!base)
2513                 goto failure;
2514
2515         num_reserved_chans = d40_phy_res_init(base);
2516
2517         platform_set_drvdata(pdev, base);
2518
2519         spin_lock_init(&base->interrupt_lock);
2520         spin_lock_init(&base->execmd_lock);
2521
2522         /* Get IO for logical channel parameter address */
2523         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2524         if (!res) {
2525                 ret = -ENOENT;
2526                 dev_err(&pdev->dev,
2527                         "[%s] No \"lcpa\" memory resource\n",
2528                         __func__);
2529                 goto failure;
2530         }
2531         base->lcpa_size = resource_size(res);
2532         base->phy_lcpa = res->start;
2533
2534         if (request_mem_region(res->start, resource_size(res),
2535                                D40_NAME " I/O lcpa") == NULL) {
2536                 ret = -EBUSY;
2537                 dev_err(&pdev->dev,
2538                         "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2539                         __func__, res->start, res->end);
2540                 goto failure;
2541         }
2542
2543         /* We make use of ESRAM memory for this. */
2544         val = readl(base->virtbase + D40_DREG_LCPA);
2545         if (res->start != val && val != 0) {
2546                 dev_warn(&pdev->dev,
2547                          "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2548                          __func__, val, res->start);
2549         } else
2550                 writel(res->start, base->virtbase + D40_DREG_LCPA);
2551
2552         base->lcpa_base = ioremap(res->start, resource_size(res));
2553         if (!base->lcpa_base) {
2554                 ret = -ENOMEM;
2555                 dev_err(&pdev->dev,
2556                         "[%s] Failed to ioremap LCPA region\n",
2557                         __func__);
2558                 goto failure;
2559         }
2560         /* Get IO for logical channel link address */
2561         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
2562         if (!res) {
2563                 ret = -ENOENT;
2564                 dev_err(&pdev->dev,
2565                         "[%s] No \"lcla\" resource defined\n",
2566                         __func__);
2567                 goto failure;
2568         }
2569
2570         base->lcla_pool.base_size = resource_size(res);
2571         base->lcla_pool.phy = res->start;
2572
2573         if (request_mem_region(res->start, resource_size(res),
2574                                D40_NAME " I/O lcla") == NULL) {
2575                 ret = -EBUSY;
2576                 dev_err(&pdev->dev,
2577                         "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2578                         __func__, res->start, res->end);
2579                 goto failure;
2580         }
2581         val = readl(base->virtbase + D40_DREG_LCLA);
2582         if (res->start != val && val != 0) {
2583                 dev_warn(&pdev->dev,
2584                          "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2585                          __func__, val, res->start);
2586         } else
2587                 writel(res->start, base->virtbase + D40_DREG_LCLA);
2588
2589         base->lcla_pool.base = ioremap(res->start, resource_size(res));
2590         if (!base->lcla_pool.base) {
2591                 ret = -ENOMEM;
2592                 dev_err(&pdev->dev,
2593                         "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2594                         __func__, res->start, res->end);
2595                 goto failure;
2596         }
2597
2598         spin_lock_init(&base->lcla_pool.lock);
2599
2600         base->lcla_pool.num_blocks = base->num_phy_chans;
2601
2602         base->irq = platform_get_irq(pdev, 0);
2603
2604         ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2605
2606         if (ret) {
2607                 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2608                 goto failure;
2609         }
2610
2611         err = d40_dmaengine_init(base, num_reserved_chans);
2612         if (err)
2613                 goto failure;
2614
2615         d40_hw_init(base);
2616
2617         dev_info(base->dev, "initialized\n");
2618         return 0;
2619
2620 failure:
2621         if (base) {
2622                 if (base->desc_slab)
2623                         kmem_cache_destroy(base->desc_slab);
2624                 if (base->virtbase)
2625                         iounmap(base->virtbase);
2626                 if (base->lcla_pool.phy)
2627                         release_mem_region(base->lcla_pool.phy,
2628                                            base->lcla_pool.base_size);
2629                 if (base->phy_lcpa)
2630                         release_mem_region(base->phy_lcpa,
2631                                            base->lcpa_size);
2632                 if (base->phy_start)
2633                         release_mem_region(base->phy_start,
2634                                            base->phy_size);
2635                 if (base->clk) {
2636                         clk_disable(base->clk);
2637                         clk_put(base->clk);
2638                 }
2639
2640                 kfree(base->lcla_pool.alloc_map);
2641                 kfree(base->lookup_log_chans);
2642                 kfree(base->lookup_phy_chans);
2643                 kfree(base->phy_res);
2644                 kfree(base);
2645         }
2646
2647         dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2648         return ret;
2649 }
2650
2651 static struct platform_driver d40_driver = {
2652         .driver = {
2653                 .owner = THIS_MODULE,
2654                 .name  = D40_NAME,
2655         },
2656 };
2657
2658 int __init stedma40_init(void)
2659 {
2660         return platform_driver_probe(&d40_driver, d40_probe);
2661 }
2662 arch_initcall(stedma40_init);