dmaengine/ste_dma40: allow memory buswidth/burst to be configured
[linux-2.6.git] / drivers / dma / ste_dma40.c
1 /*
2  * Copyright (C) Ericsson AB 2007-2008
3  * Copyright (C) ST-Ericsson SA 2008-2010
4  * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5  * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6  * License terms: GNU General Public License (GPL) version 2
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/dmaengine.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/err.h>
16 #include <linux/amba/bus.h>
17
18 #include <plat/ste_dma40.h>
19
20 #include "ste_dma40_ll.h"
21
22 #define D40_NAME "dma40"
23
24 #define D40_PHY_CHAN -1
25
26 /* For masking out/in 2 bit channel positions */
27 #define D40_CHAN_POS(chan)  (2 * (chan / 2))
28 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29
30 /* Maximum iterations taken before giving up suspending a channel */
31 #define D40_SUSPEND_MAX_IT 500
32
33 /* Hardware requirement on LCLA alignment */
34 #define LCLA_ALIGNMENT 0x40000
35
36 /* Max number of links per event group */
37 #define D40_LCLA_LINK_PER_EVENT_GRP 128
38 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
39
40 /* Attempts before giving up to trying to get pages that are aligned */
41 #define MAX_LCLA_ALLOC_ATTEMPTS 256
42
43 /* Bit markings for allocation map */
44 #define D40_ALLOC_FREE          (1 << 31)
45 #define D40_ALLOC_PHY           (1 << 30)
46 #define D40_ALLOC_LOG_FREE      0
47
48 /**
49  * enum 40_command - The different commands and/or statuses.
50  *
51  * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
52  * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
53  * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
54  * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
55  */
56 enum d40_command {
57         D40_DMA_STOP            = 0,
58         D40_DMA_RUN             = 1,
59         D40_DMA_SUSPEND_REQ     = 2,
60         D40_DMA_SUSPENDED       = 3
61 };
62
63 /**
64  * struct d40_lli_pool - Structure for keeping LLIs in memory
65  *
66  * @base: Pointer to memory area when the pre_alloc_lli's are not large
67  * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
68  * pre_alloc_lli is used.
69  * @dma_addr: DMA address, if mapped
70  * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
71  * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
72  * one buffer to one buffer.
73  */
74 struct d40_lli_pool {
75         void    *base;
76         int      size;
77         dma_addr_t      dma_addr;
78         /* Space for dst and src, plus an extra for padding */
79         u8       pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
80 };
81
82 /**
83  * struct d40_desc - A descriptor is one DMA job.
84  *
85  * @lli_phy: LLI settings for physical channel. Both src and dst=
86  * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
87  * lli_len equals one.
88  * @lli_log: Same as above but for logical channels.
89  * @lli_pool: The pool with two entries pre-allocated.
90  * @lli_len: Number of llis of current descriptor.
91  * @lli_current: Number of transferred llis.
92  * @lcla_alloc: Number of LCLA entries allocated.
93  * @txd: DMA engine struct. Used for among other things for communication
94  * during a transfer.
95  * @node: List entry.
96  * @is_in_client_list: true if the client owns this descriptor.
97  * the previous one.
98  *
99  * This descriptor is used for both logical and physical transfers.
100  */
101 struct d40_desc {
102         /* LLI physical */
103         struct d40_phy_lli_bidir         lli_phy;
104         /* LLI logical */
105         struct d40_log_lli_bidir         lli_log;
106
107         struct d40_lli_pool              lli_pool;
108         int                              lli_len;
109         int                              lli_current;
110         int                              lcla_alloc;
111
112         struct dma_async_tx_descriptor   txd;
113         struct list_head                 node;
114
115         bool                             is_in_client_list;
116         bool                             cyclic;
117 };
118
119 /**
120  * struct d40_lcla_pool - LCLA pool settings and data.
121  *
122  * @base: The virtual address of LCLA. 18 bit aligned.
123  * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
124  * This pointer is only there for clean-up on error.
125  * @pages: The number of pages needed for all physical channels.
126  * Only used later for clean-up on error
127  * @lock: Lock to protect the content in this struct.
128  * @alloc_map: big map over which LCLA entry is own by which job.
129  */
130 struct d40_lcla_pool {
131         void            *base;
132         dma_addr_t      dma_addr;
133         void            *base_unaligned;
134         int              pages;
135         spinlock_t       lock;
136         struct d40_desc **alloc_map;
137 };
138
139 /**
140  * struct d40_phy_res - struct for handling eventlines mapped to physical
141  * channels.
142  *
143  * @lock: A lock protection this entity.
144  * @num: The physical channel number of this entity.
145  * @allocated_src: Bit mapped to show which src event line's are mapped to
146  * this physical channel. Can also be free or physically allocated.
147  * @allocated_dst: Same as for src but is dst.
148  * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
149  * event line number.
150  */
151 struct d40_phy_res {
152         spinlock_t lock;
153         int        num;
154         u32        allocated_src;
155         u32        allocated_dst;
156 };
157
158 struct d40_base;
159
160 /**
161  * struct d40_chan - Struct that describes a channel.
162  *
163  * @lock: A spinlock to protect this struct.
164  * @log_num: The logical number, if any of this channel.
165  * @completed: Starts with 1, after first interrupt it is set to dma engine's
166  * current cookie.
167  * @pending_tx: The number of pending transfers. Used between interrupt handler
168  * and tasklet.
169  * @busy: Set to true when transfer is ongoing on this channel.
170  * @phy_chan: Pointer to physical channel which this instance runs on. If this
171  * point is NULL, then the channel is not allocated.
172  * @chan: DMA engine handle.
173  * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
174  * transfer and call client callback.
175  * @client: Cliented owned descriptor list.
176  * @active: Active descriptor.
177  * @queue: Queued jobs.
178  * @dma_cfg: The client configuration of this dma channel.
179  * @configured: whether the dma_cfg configuration is valid
180  * @base: Pointer to the device instance struct.
181  * @src_def_cfg: Default cfg register setting for src.
182  * @dst_def_cfg: Default cfg register setting for dst.
183  * @log_def: Default logical channel settings.
184  * @lcla: Space for one dst src pair for logical channel transfers.
185  * @lcpa: Pointer to dst and src lcpa settings.
186  * @runtime_addr: runtime configured address.
187  * @runtime_direction: runtime configured direction.
188  *
189  * This struct can either "be" a logical or a physical channel.
190  */
191 struct d40_chan {
192         spinlock_t                       lock;
193         int                              log_num;
194         /* ID of the most recent completed transfer */
195         int                              completed;
196         int                              pending_tx;
197         bool                             busy;
198         struct d40_phy_res              *phy_chan;
199         struct dma_chan                  chan;
200         struct tasklet_struct            tasklet;
201         struct list_head                 client;
202         struct list_head                 pending_queue;
203         struct list_head                 active;
204         struct list_head                 queue;
205         struct stedma40_chan_cfg         dma_cfg;
206         bool                             configured;
207         struct d40_base                 *base;
208         /* Default register configurations */
209         u32                              src_def_cfg;
210         u32                              dst_def_cfg;
211         struct d40_def_lcsp              log_def;
212         struct d40_log_lli_full         *lcpa;
213         /* Runtime reconfiguration */
214         dma_addr_t                      runtime_addr;
215         enum dma_data_direction         runtime_direction;
216 };
217
218 /**
219  * struct d40_base - The big global struct, one for each probe'd instance.
220  *
221  * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
222  * @execmd_lock: Lock for execute command usage since several channels share
223  * the same physical register.
224  * @dev: The device structure.
225  * @virtbase: The virtual base address of the DMA's register.
226  * @rev: silicon revision detected.
227  * @clk: Pointer to the DMA clock structure.
228  * @phy_start: Physical memory start of the DMA registers.
229  * @phy_size: Size of the DMA register map.
230  * @irq: The IRQ number.
231  * @num_phy_chans: The number of physical channels. Read from HW. This
232  * is the number of available channels for this driver, not counting "Secure
233  * mode" allocated physical channels.
234  * @num_log_chans: The number of logical channels. Calculated from
235  * num_phy_chans.
236  * @dma_both: dma_device channels that can do both memcpy and slave transfers.
237  * @dma_slave: dma_device channels that can do only do slave transfers.
238  * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
239  * @log_chans: Room for all possible logical channels in system.
240  * @lookup_log_chans: Used to map interrupt number to logical channel. Points
241  * to log_chans entries.
242  * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
243  * to phy_chans entries.
244  * @plat_data: Pointer to provided platform_data which is the driver
245  * configuration.
246  * @phy_res: Vector containing all physical channels.
247  * @lcla_pool: lcla pool settings and data.
248  * @lcpa_base: The virtual mapped address of LCPA.
249  * @phy_lcpa: The physical address of the LCPA.
250  * @lcpa_size: The size of the LCPA area.
251  * @desc_slab: cache for descriptors.
252  */
253 struct d40_base {
254         spinlock_t                       interrupt_lock;
255         spinlock_t                       execmd_lock;
256         struct device                    *dev;
257         void __iomem                     *virtbase;
258         u8                                rev:4;
259         struct clk                       *clk;
260         phys_addr_t                       phy_start;
261         resource_size_t                   phy_size;
262         int                               irq;
263         int                               num_phy_chans;
264         int                               num_log_chans;
265         struct dma_device                 dma_both;
266         struct dma_device                 dma_slave;
267         struct dma_device                 dma_memcpy;
268         struct d40_chan                  *phy_chans;
269         struct d40_chan                  *log_chans;
270         struct d40_chan                 **lookup_log_chans;
271         struct d40_chan                 **lookup_phy_chans;
272         struct stedma40_platform_data    *plat_data;
273         /* Physical half channels */
274         struct d40_phy_res               *phy_res;
275         struct d40_lcla_pool              lcla_pool;
276         void                             *lcpa_base;
277         dma_addr_t                        phy_lcpa;
278         resource_size_t                   lcpa_size;
279         struct kmem_cache                *desc_slab;
280 };
281
282 /**
283  * struct d40_interrupt_lookup - lookup table for interrupt handler
284  *
285  * @src: Interrupt mask register.
286  * @clr: Interrupt clear register.
287  * @is_error: true if this is an error interrupt.
288  * @offset: start delta in the lookup_log_chans in d40_base. If equals to
289  * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
290  */
291 struct d40_interrupt_lookup {
292         u32 src;
293         u32 clr;
294         bool is_error;
295         int offset;
296 };
297
298 /**
299  * struct d40_reg_val - simple lookup struct
300  *
301  * @reg: The register.
302  * @val: The value that belongs to the register in reg.
303  */
304 struct d40_reg_val {
305         unsigned int reg;
306         unsigned int val;
307 };
308
309 static struct device *chan2dev(struct d40_chan *d40c)
310 {
311         return &d40c->chan.dev->device;
312 }
313
314 static bool chan_is_physical(struct d40_chan *chan)
315 {
316         return chan->log_num == D40_PHY_CHAN;
317 }
318
319 static bool chan_is_logical(struct d40_chan *chan)
320 {
321         return !chan_is_physical(chan);
322 }
323
324 static void __iomem *chan_base(struct d40_chan *chan)
325 {
326         return chan->base->virtbase + D40_DREG_PCBASE +
327                chan->phy_chan->num * D40_DREG_PCDELTA;
328 }
329
330 #define d40_err(dev, format, arg...)            \
331         dev_err(dev, "[%s] " format, __func__, ## arg)
332
333 #define chan_err(d40c, format, arg...)          \
334         d40_err(chan2dev(d40c), format, ## arg)
335
336 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
337                               int lli_len)
338 {
339         bool is_log = chan_is_logical(d40c);
340         u32 align;
341         void *base;
342
343         if (is_log)
344                 align = sizeof(struct d40_log_lli);
345         else
346                 align = sizeof(struct d40_phy_lli);
347
348         if (lli_len == 1) {
349                 base = d40d->lli_pool.pre_alloc_lli;
350                 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
351                 d40d->lli_pool.base = NULL;
352         } else {
353                 d40d->lli_pool.size = lli_len * 2 * align;
354
355                 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
356                 d40d->lli_pool.base = base;
357
358                 if (d40d->lli_pool.base == NULL)
359                         return -ENOMEM;
360         }
361
362         if (is_log) {
363                 d40d->lli_log.src = PTR_ALIGN(base, align);
364                 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
365
366                 d40d->lli_pool.dma_addr = 0;
367         } else {
368                 d40d->lli_phy.src = PTR_ALIGN(base, align);
369                 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
370
371                 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
372                                                          d40d->lli_phy.src,
373                                                          d40d->lli_pool.size,
374                                                          DMA_TO_DEVICE);
375
376                 if (dma_mapping_error(d40c->base->dev,
377                                       d40d->lli_pool.dma_addr)) {
378                         kfree(d40d->lli_pool.base);
379                         d40d->lli_pool.base = NULL;
380                         d40d->lli_pool.dma_addr = 0;
381                         return -ENOMEM;
382                 }
383         }
384
385         return 0;
386 }
387
388 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
389 {
390         if (d40d->lli_pool.dma_addr)
391                 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
392                                  d40d->lli_pool.size, DMA_TO_DEVICE);
393
394         kfree(d40d->lli_pool.base);
395         d40d->lli_pool.base = NULL;
396         d40d->lli_pool.size = 0;
397         d40d->lli_log.src = NULL;
398         d40d->lli_log.dst = NULL;
399         d40d->lli_phy.src = NULL;
400         d40d->lli_phy.dst = NULL;
401 }
402
403 static int d40_lcla_alloc_one(struct d40_chan *d40c,
404                               struct d40_desc *d40d)
405 {
406         unsigned long flags;
407         int i;
408         int ret = -EINVAL;
409         int p;
410
411         spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
412
413         p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
414
415         /*
416          * Allocate both src and dst at the same time, therefore the half
417          * start on 1 since 0 can't be used since zero is used as end marker.
418          */
419         for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
420                 if (!d40c->base->lcla_pool.alloc_map[p + i]) {
421                         d40c->base->lcla_pool.alloc_map[p + i] = d40d;
422                         d40d->lcla_alloc++;
423                         ret = i;
424                         break;
425                 }
426         }
427
428         spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
429
430         return ret;
431 }
432
433 static int d40_lcla_free_all(struct d40_chan *d40c,
434                              struct d40_desc *d40d)
435 {
436         unsigned long flags;
437         int i;
438         int ret = -EINVAL;
439
440         if (chan_is_physical(d40c))
441                 return 0;
442
443         spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
444
445         for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
446                 if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
447                                                     D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
448                         d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
449                                                         D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
450                         d40d->lcla_alloc--;
451                         if (d40d->lcla_alloc == 0) {
452                                 ret = 0;
453                                 break;
454                         }
455                 }
456         }
457
458         spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
459
460         return ret;
461
462 }
463
464 static void d40_desc_remove(struct d40_desc *d40d)
465 {
466         list_del(&d40d->node);
467 }
468
469 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
470 {
471         struct d40_desc *desc = NULL;
472
473         if (!list_empty(&d40c->client)) {
474                 struct d40_desc *d;
475                 struct d40_desc *_d;
476
477                 list_for_each_entry_safe(d, _d, &d40c->client, node)
478                         if (async_tx_test_ack(&d->txd)) {
479                                 d40_pool_lli_free(d40c, d);
480                                 d40_desc_remove(d);
481                                 desc = d;
482                                 memset(desc, 0, sizeof(*desc));
483                                 break;
484                         }
485         }
486
487         if (!desc)
488                 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
489
490         if (desc)
491                 INIT_LIST_HEAD(&desc->node);
492
493         return desc;
494 }
495
496 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
497 {
498
499         d40_pool_lli_free(d40c, d40d);
500         d40_lcla_free_all(d40c, d40d);
501         kmem_cache_free(d40c->base->desc_slab, d40d);
502 }
503
504 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
505 {
506         list_add_tail(&desc->node, &d40c->active);
507 }
508
509 static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
510 {
511         struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
512         struct d40_phy_lli *lli_src = desc->lli_phy.src;
513         void __iomem *base = chan_base(chan);
514
515         writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
516         writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
517         writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
518         writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
519
520         writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
521         writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
522         writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
523         writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
524 }
525
526 static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
527 {
528         struct d40_lcla_pool *pool = &chan->base->lcla_pool;
529         struct d40_log_lli_bidir *lli = &desc->lli_log;
530         int lli_current = desc->lli_current;
531         int lli_len = desc->lli_len;
532         bool cyclic = desc->cyclic;
533         int curr_lcla = -EINVAL;
534         int first_lcla = 0;
535         bool linkback;
536
537         /*
538          * We may have partially running cyclic transfers, in case we did't get
539          * enough LCLA entries.
540          */
541         linkback = cyclic && lli_current == 0;
542
543         /*
544          * For linkback, we need one LCLA even with only one link, because we
545          * can't link back to the one in LCPA space
546          */
547         if (linkback || (lli_len - lli_current > 1)) {
548                 curr_lcla = d40_lcla_alloc_one(chan, desc);
549                 first_lcla = curr_lcla;
550         }
551
552         /*
553          * For linkback, we normally load the LCPA in the loop since we need to
554          * link it to the second LCLA and not the first.  However, if we
555          * couldn't even get a first LCLA, then we have to run in LCPA and
556          * reload manually.
557          */
558         if (!linkback || curr_lcla == -EINVAL) {
559                 unsigned int flags = 0;
560
561                 if (curr_lcla == -EINVAL)
562                         flags |= LLI_TERM_INT;
563
564                 d40_log_lli_lcpa_write(chan->lcpa,
565                                        &lli->dst[lli_current],
566                                        &lli->src[lli_current],
567                                        curr_lcla,
568                                        flags);
569                 lli_current++;
570         }
571
572         if (curr_lcla < 0)
573                 goto out;
574
575         for (; lli_current < lli_len; lli_current++) {
576                 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
577                                            8 * curr_lcla * 2;
578                 struct d40_log_lli *lcla = pool->base + lcla_offset;
579                 unsigned int flags = 0;
580                 int next_lcla;
581
582                 if (lli_current + 1 < lli_len)
583                         next_lcla = d40_lcla_alloc_one(chan, desc);
584                 else
585                         next_lcla = linkback ? first_lcla : -EINVAL;
586
587                 if (cyclic || next_lcla == -EINVAL)
588                         flags |= LLI_TERM_INT;
589
590                 if (linkback && curr_lcla == first_lcla) {
591                         /* First link goes in both LCPA and LCLA */
592                         d40_log_lli_lcpa_write(chan->lcpa,
593                                                &lli->dst[lli_current],
594                                                &lli->src[lli_current],
595                                                next_lcla, flags);
596                 }
597
598                 /*
599                  * One unused LCLA in the cyclic case if the very first
600                  * next_lcla fails...
601                  */
602                 d40_log_lli_lcla_write(lcla,
603                                        &lli->dst[lli_current],
604                                        &lli->src[lli_current],
605                                        next_lcla, flags);
606
607                 dma_sync_single_range_for_device(chan->base->dev,
608                                         pool->dma_addr, lcla_offset,
609                                         2 * sizeof(struct d40_log_lli),
610                                         DMA_TO_DEVICE);
611
612                 curr_lcla = next_lcla;
613
614                 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
615                         lli_current++;
616                         break;
617                 }
618         }
619
620 out:
621         desc->lli_current = lli_current;
622 }
623
624 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
625 {
626         if (chan_is_physical(d40c)) {
627                 d40_phy_lli_load(d40c, d40d);
628                 d40d->lli_current = d40d->lli_len;
629         } else
630                 d40_log_lli_to_lcxa(d40c, d40d);
631 }
632
633 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
634 {
635         struct d40_desc *d;
636
637         if (list_empty(&d40c->active))
638                 return NULL;
639
640         d = list_first_entry(&d40c->active,
641                              struct d40_desc,
642                              node);
643         return d;
644 }
645
646 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
647 {
648         list_add_tail(&desc->node, &d40c->pending_queue);
649 }
650
651 static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
652 {
653         struct d40_desc *d;
654
655         if (list_empty(&d40c->pending_queue))
656                 return NULL;
657
658         d = list_first_entry(&d40c->pending_queue,
659                              struct d40_desc,
660                              node);
661         return d;
662 }
663
664 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
665 {
666         struct d40_desc *d;
667
668         if (list_empty(&d40c->queue))
669                 return NULL;
670
671         d = list_first_entry(&d40c->queue,
672                              struct d40_desc,
673                              node);
674         return d;
675 }
676
677 static int d40_psize_2_burst_size(bool is_log, int psize)
678 {
679         if (is_log) {
680                 if (psize == STEDMA40_PSIZE_LOG_1)
681                         return 1;
682         } else {
683                 if (psize == STEDMA40_PSIZE_PHY_1)
684                         return 1;
685         }
686
687         return 2 << psize;
688 }
689
690 /*
691  * The dma only supports transmitting packages up to
692  * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
693  * dma elements required to send the entire sg list
694  */
695 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
696 {
697         int dmalen;
698         u32 max_w = max(data_width1, data_width2);
699         u32 min_w = min(data_width1, data_width2);
700         u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
701
702         if (seg_max > STEDMA40_MAX_SEG_SIZE)
703                 seg_max -= (1 << max_w);
704
705         if (!IS_ALIGNED(size, 1 << max_w))
706                 return -EINVAL;
707
708         if (size <= seg_max)
709                 dmalen = 1;
710         else {
711                 dmalen = size / seg_max;
712                 if (dmalen * seg_max < size)
713                         dmalen++;
714         }
715         return dmalen;
716 }
717
718 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
719                            u32 data_width1, u32 data_width2)
720 {
721         struct scatterlist *sg;
722         int i;
723         int len = 0;
724         int ret;
725
726         for_each_sg(sgl, sg, sg_len, i) {
727                 ret = d40_size_2_dmalen(sg_dma_len(sg),
728                                         data_width1, data_width2);
729                 if (ret < 0)
730                         return ret;
731                 len += ret;
732         }
733         return len;
734 }
735
736 /* Support functions for logical channels */
737
738 static int d40_channel_execute_command(struct d40_chan *d40c,
739                                        enum d40_command command)
740 {
741         u32 status;
742         int i;
743         void __iomem *active_reg;
744         int ret = 0;
745         unsigned long flags;
746         u32 wmask;
747
748         spin_lock_irqsave(&d40c->base->execmd_lock, flags);
749
750         if (d40c->phy_chan->num % 2 == 0)
751                 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
752         else
753                 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
754
755         if (command == D40_DMA_SUSPEND_REQ) {
756                 status = (readl(active_reg) &
757                           D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
758                         D40_CHAN_POS(d40c->phy_chan->num);
759
760                 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
761                         goto done;
762         }
763
764         wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
765         writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
766                active_reg);
767
768         if (command == D40_DMA_SUSPEND_REQ) {
769
770                 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
771                         status = (readl(active_reg) &
772                                   D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
773                                 D40_CHAN_POS(d40c->phy_chan->num);
774
775                         cpu_relax();
776                         /*
777                          * Reduce the number of bus accesses while
778                          * waiting for the DMA to suspend.
779                          */
780                         udelay(3);
781
782                         if (status == D40_DMA_STOP ||
783                             status == D40_DMA_SUSPENDED)
784                                 break;
785                 }
786
787                 if (i == D40_SUSPEND_MAX_IT) {
788                         chan_err(d40c,
789                                 "unable to suspend the chl %d (log: %d) status %x\n",
790                                 d40c->phy_chan->num, d40c->log_num,
791                                 status);
792                         dump_stack();
793                         ret = -EBUSY;
794                 }
795
796         }
797 done:
798         spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
799         return ret;
800 }
801
802 static void d40_term_all(struct d40_chan *d40c)
803 {
804         struct d40_desc *d40d;
805
806         /* Release active descriptors */
807         while ((d40d = d40_first_active_get(d40c))) {
808                 d40_desc_remove(d40d);
809                 d40_desc_free(d40c, d40d);
810         }
811
812         /* Release queued descriptors waiting for transfer */
813         while ((d40d = d40_first_queued(d40c))) {
814                 d40_desc_remove(d40d);
815                 d40_desc_free(d40c, d40d);
816         }
817
818         /* Release pending descriptors */
819         while ((d40d = d40_first_pending(d40c))) {
820                 d40_desc_remove(d40d);
821                 d40_desc_free(d40c, d40d);
822         }
823
824         d40c->pending_tx = 0;
825         d40c->busy = false;
826 }
827
828 static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
829                                    u32 event, int reg)
830 {
831         void __iomem *addr = chan_base(d40c) + reg;
832         int tries;
833
834         if (!enable) {
835                 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
836                        | ~D40_EVENTLINE_MASK(event), addr);
837                 return;
838         }
839
840         /*
841          * The hardware sometimes doesn't register the enable when src and dst
842          * event lines are active on the same logical channel.  Retry to ensure
843          * it does.  Usually only one retry is sufficient.
844          */
845         tries = 100;
846         while (--tries) {
847                 writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
848                        | ~D40_EVENTLINE_MASK(event), addr);
849
850                 if (readl(addr) & D40_EVENTLINE_MASK(event))
851                         break;
852         }
853
854         if (tries != 99)
855                 dev_dbg(chan2dev(d40c),
856                         "[%s] workaround enable S%cLNK (%d tries)\n",
857                         __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
858                         100 - tries);
859
860         WARN_ON(!tries);
861 }
862
863 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
864 {
865         unsigned long flags;
866
867         spin_lock_irqsave(&d40c->phy_chan->lock, flags);
868
869         /* Enable event line connected to device (or memcpy) */
870         if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
871             (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
872                 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
873
874                 __d40_config_set_event(d40c, do_enable, event,
875                                        D40_CHAN_REG_SSLNK);
876         }
877
878         if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM) {
879                 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
880
881                 __d40_config_set_event(d40c, do_enable, event,
882                                        D40_CHAN_REG_SDLNK);
883         }
884
885         spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
886 }
887
888 static u32 d40_chan_has_events(struct d40_chan *d40c)
889 {
890         void __iomem *chanbase = chan_base(d40c);
891         u32 val;
892
893         val = readl(chanbase + D40_CHAN_REG_SSLNK);
894         val |= readl(chanbase + D40_CHAN_REG_SDLNK);
895
896         return val;
897 }
898
899 static u32 d40_get_prmo(struct d40_chan *d40c)
900 {
901         static const unsigned int phy_map[] = {
902                 [STEDMA40_PCHAN_BASIC_MODE]
903                         = D40_DREG_PRMO_PCHAN_BASIC,
904                 [STEDMA40_PCHAN_MODULO_MODE]
905                         = D40_DREG_PRMO_PCHAN_MODULO,
906                 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
907                         = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
908         };
909         static const unsigned int log_map[] = {
910                 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
911                         = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
912                 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
913                         = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
914                 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
915                         = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
916         };
917
918         if (chan_is_physical(d40c))
919                 return phy_map[d40c->dma_cfg.mode_opt];
920         else
921                 return log_map[d40c->dma_cfg.mode_opt];
922 }
923
924 static void d40_config_write(struct d40_chan *d40c)
925 {
926         u32 addr_base;
927         u32 var;
928
929         /* Odd addresses are even addresses + 4 */
930         addr_base = (d40c->phy_chan->num % 2) * 4;
931         /* Setup channel mode to logical or physical */
932         var = ((u32)(chan_is_logical(d40c)) + 1) <<
933                 D40_CHAN_POS(d40c->phy_chan->num);
934         writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
935
936         /* Setup operational mode option register */
937         var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
938
939         writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
940
941         if (chan_is_logical(d40c)) {
942                 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
943                            & D40_SREG_ELEM_LOG_LIDX_MASK;
944                 void __iomem *chanbase = chan_base(d40c);
945
946                 /* Set default config for CFG reg */
947                 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
948                 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
949
950                 /* Set LIDX for lcla */
951                 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
952                 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
953         }
954 }
955
956 static u32 d40_residue(struct d40_chan *d40c)
957 {
958         u32 num_elt;
959
960         if (chan_is_logical(d40c))
961                 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
962                         >> D40_MEM_LCSP2_ECNT_POS;
963         else {
964                 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
965                 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
966                           >> D40_SREG_ELEM_PHY_ECNT_POS;
967         }
968
969         return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
970 }
971
972 static bool d40_tx_is_linked(struct d40_chan *d40c)
973 {
974         bool is_link;
975
976         if (chan_is_logical(d40c))
977                 is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK;
978         else
979                 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
980                           & D40_SREG_LNK_PHYS_LNK_MASK;
981
982         return is_link;
983 }
984
985 static int d40_pause(struct d40_chan *d40c)
986 {
987         int res = 0;
988         unsigned long flags;
989
990         if (!d40c->busy)
991                 return 0;
992
993         spin_lock_irqsave(&d40c->lock, flags);
994
995         res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
996         if (res == 0) {
997                 if (chan_is_logical(d40c)) {
998                         d40_config_set_event(d40c, false);
999                         /* Resume the other logical channels if any */
1000                         if (d40_chan_has_events(d40c))
1001                                 res = d40_channel_execute_command(d40c,
1002                                                                   D40_DMA_RUN);
1003                 }
1004         }
1005
1006         spin_unlock_irqrestore(&d40c->lock, flags);
1007         return res;
1008 }
1009
1010 static int d40_resume(struct d40_chan *d40c)
1011 {
1012         int res = 0;
1013         unsigned long flags;
1014
1015         if (!d40c->busy)
1016                 return 0;
1017
1018         spin_lock_irqsave(&d40c->lock, flags);
1019
1020         if (d40c->base->rev == 0)
1021                 if (chan_is_logical(d40c)) {
1022                         res = d40_channel_execute_command(d40c,
1023                                                           D40_DMA_SUSPEND_REQ);
1024                         goto no_suspend;
1025                 }
1026
1027         /* If bytes left to transfer or linked tx resume job */
1028         if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1029
1030                 if (chan_is_logical(d40c))
1031                         d40_config_set_event(d40c, true);
1032
1033                 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1034         }
1035
1036 no_suspend:
1037         spin_unlock_irqrestore(&d40c->lock, flags);
1038         return res;
1039 }
1040
1041 static int d40_terminate_all(struct d40_chan *chan)
1042 {
1043         unsigned long flags;
1044         int ret = 0;
1045
1046         ret = d40_pause(chan);
1047         if (!ret && chan_is_physical(chan))
1048                 ret = d40_channel_execute_command(chan, D40_DMA_STOP);
1049
1050         spin_lock_irqsave(&chan->lock, flags);
1051         d40_term_all(chan);
1052         spin_unlock_irqrestore(&chan->lock, flags);
1053
1054         return ret;
1055 }
1056
1057 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1058 {
1059         struct d40_chan *d40c = container_of(tx->chan,
1060                                              struct d40_chan,
1061                                              chan);
1062         struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1063         unsigned long flags;
1064
1065         spin_lock_irqsave(&d40c->lock, flags);
1066
1067         d40c->chan.cookie++;
1068
1069         if (d40c->chan.cookie < 0)
1070                 d40c->chan.cookie = 1;
1071
1072         d40d->txd.cookie = d40c->chan.cookie;
1073
1074         d40_desc_queue(d40c, d40d);
1075
1076         spin_unlock_irqrestore(&d40c->lock, flags);
1077
1078         return tx->cookie;
1079 }
1080
1081 static int d40_start(struct d40_chan *d40c)
1082 {
1083         if (d40c->base->rev == 0) {
1084                 int err;
1085
1086                 if (chan_is_logical(d40c)) {
1087                         err = d40_channel_execute_command(d40c,
1088                                                           D40_DMA_SUSPEND_REQ);
1089                         if (err)
1090                                 return err;
1091                 }
1092         }
1093
1094         if (chan_is_logical(d40c))
1095                 d40_config_set_event(d40c, true);
1096
1097         return d40_channel_execute_command(d40c, D40_DMA_RUN);
1098 }
1099
1100 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1101 {
1102         struct d40_desc *d40d;
1103         int err;
1104
1105         /* Start queued jobs, if any */
1106         d40d = d40_first_queued(d40c);
1107
1108         if (d40d != NULL) {
1109                 d40c->busy = true;
1110
1111                 /* Remove from queue */
1112                 d40_desc_remove(d40d);
1113
1114                 /* Add to active queue */
1115                 d40_desc_submit(d40c, d40d);
1116
1117                 /* Initiate DMA job */
1118                 d40_desc_load(d40c, d40d);
1119
1120                 /* Start dma job */
1121                 err = d40_start(d40c);
1122
1123                 if (err)
1124                         return NULL;
1125         }
1126
1127         return d40d;
1128 }
1129
1130 /* called from interrupt context */
1131 static void dma_tc_handle(struct d40_chan *d40c)
1132 {
1133         struct d40_desc *d40d;
1134
1135         /* Get first active entry from list */
1136         d40d = d40_first_active_get(d40c);
1137
1138         if (d40d == NULL)
1139                 return;
1140
1141         if (d40d->cyclic) {
1142                 /*
1143                  * If this was a paritially loaded list, we need to reloaded
1144                  * it, and only when the list is completed.  We need to check
1145                  * for done because the interrupt will hit for every link, and
1146                  * not just the last one.
1147                  */
1148                 if (d40d->lli_current < d40d->lli_len
1149                     && !d40_tx_is_linked(d40c)
1150                     && !d40_residue(d40c)) {
1151                         d40_lcla_free_all(d40c, d40d);
1152                         d40_desc_load(d40c, d40d);
1153                         (void) d40_start(d40c);
1154
1155                         if (d40d->lli_current == d40d->lli_len)
1156                                 d40d->lli_current = 0;
1157                 }
1158         } else {
1159                 d40_lcla_free_all(d40c, d40d);
1160
1161                 if (d40d->lli_current < d40d->lli_len) {
1162                         d40_desc_load(d40c, d40d);
1163                         /* Start dma job */
1164                         (void) d40_start(d40c);
1165                         return;
1166                 }
1167
1168                 if (d40_queue_start(d40c) == NULL)
1169                         d40c->busy = false;
1170         }
1171
1172         d40c->pending_tx++;
1173         tasklet_schedule(&d40c->tasklet);
1174
1175 }
1176
1177 static void dma_tasklet(unsigned long data)
1178 {
1179         struct d40_chan *d40c = (struct d40_chan *) data;
1180         struct d40_desc *d40d;
1181         unsigned long flags;
1182         dma_async_tx_callback callback;
1183         void *callback_param;
1184
1185         spin_lock_irqsave(&d40c->lock, flags);
1186
1187         /* Get first active entry from list */
1188         d40d = d40_first_active_get(d40c);
1189         if (d40d == NULL)
1190                 goto err;
1191
1192         if (!d40d->cyclic)
1193                 d40c->completed = d40d->txd.cookie;
1194
1195         /*
1196          * If terminating a channel pending_tx is set to zero.
1197          * This prevents any finished active jobs to return to the client.
1198          */
1199         if (d40c->pending_tx == 0) {
1200                 spin_unlock_irqrestore(&d40c->lock, flags);
1201                 return;
1202         }
1203
1204         /* Callback to client */
1205         callback = d40d->txd.callback;
1206         callback_param = d40d->txd.callback_param;
1207
1208         if (!d40d->cyclic) {
1209                 if (async_tx_test_ack(&d40d->txd)) {
1210                         d40_pool_lli_free(d40c, d40d);
1211                         d40_desc_remove(d40d);
1212                         d40_desc_free(d40c, d40d);
1213                 } else {
1214                         if (!d40d->is_in_client_list) {
1215                                 d40_desc_remove(d40d);
1216                                 d40_lcla_free_all(d40c, d40d);
1217                                 list_add_tail(&d40d->node, &d40c->client);
1218                                 d40d->is_in_client_list = true;
1219                         }
1220                 }
1221         }
1222
1223         d40c->pending_tx--;
1224
1225         if (d40c->pending_tx)
1226                 tasklet_schedule(&d40c->tasklet);
1227
1228         spin_unlock_irqrestore(&d40c->lock, flags);
1229
1230         if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
1231                 callback(callback_param);
1232
1233         return;
1234
1235  err:
1236         /* Rescue manoeuvre if receiving double interrupts */
1237         if (d40c->pending_tx > 0)
1238                 d40c->pending_tx--;
1239         spin_unlock_irqrestore(&d40c->lock, flags);
1240 }
1241
1242 static irqreturn_t d40_handle_interrupt(int irq, void *data)
1243 {
1244         static const struct d40_interrupt_lookup il[] = {
1245                 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false,  0},
1246                 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
1247                 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
1248                 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
1249                 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true,   0},
1250                 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true,  32},
1251                 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true,  64},
1252                 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true,  96},
1253                 {D40_DREG_PCTIS,  D40_DREG_PCICR,  false, D40_PHY_CHAN},
1254                 {D40_DREG_PCEIS,  D40_DREG_PCICR,  true,  D40_PHY_CHAN},
1255         };
1256
1257         int i;
1258         u32 regs[ARRAY_SIZE(il)];
1259         u32 idx;
1260         u32 row;
1261         long chan = -1;
1262         struct d40_chan *d40c;
1263         unsigned long flags;
1264         struct d40_base *base = data;
1265
1266         spin_lock_irqsave(&base->interrupt_lock, flags);
1267
1268         /* Read interrupt status of both logical and physical channels */
1269         for (i = 0; i < ARRAY_SIZE(il); i++)
1270                 regs[i] = readl(base->virtbase + il[i].src);
1271
1272         for (;;) {
1273
1274                 chan = find_next_bit((unsigned long *)regs,
1275                                      BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
1276
1277                 /* No more set bits found? */
1278                 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
1279                         break;
1280
1281                 row = chan / BITS_PER_LONG;
1282                 idx = chan & (BITS_PER_LONG - 1);
1283
1284                 /* ACK interrupt */
1285                 writel(1 << idx, base->virtbase + il[row].clr);
1286
1287                 if (il[row].offset == D40_PHY_CHAN)
1288                         d40c = base->lookup_phy_chans[idx];
1289                 else
1290                         d40c = base->lookup_log_chans[il[row].offset + idx];
1291                 spin_lock(&d40c->lock);
1292
1293                 if (!il[row].is_error)
1294                         dma_tc_handle(d40c);
1295                 else
1296                         d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1297                                 chan, il[row].offset, idx);
1298
1299                 spin_unlock(&d40c->lock);
1300         }
1301
1302         spin_unlock_irqrestore(&base->interrupt_lock, flags);
1303
1304         return IRQ_HANDLED;
1305 }
1306
1307 static int d40_validate_conf(struct d40_chan *d40c,
1308                              struct stedma40_chan_cfg *conf)
1309 {
1310         int res = 0;
1311         u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1312         u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1313         bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1314
1315         if (!conf->dir) {
1316                 chan_err(d40c, "Invalid direction.\n");
1317                 res = -EINVAL;
1318         }
1319
1320         if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1321             d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1322             d40c->runtime_addr == 0) {
1323
1324                 chan_err(d40c, "Invalid TX channel address (%d)\n",
1325                          conf->dst_dev_type);
1326                 res = -EINVAL;
1327         }
1328
1329         if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1330             d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1331             d40c->runtime_addr == 0) {
1332                 chan_err(d40c, "Invalid RX channel address (%d)\n",
1333                         conf->src_dev_type);
1334                 res = -EINVAL;
1335         }
1336
1337         if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1338             dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1339                 chan_err(d40c, "Invalid dst\n");
1340                 res = -EINVAL;
1341         }
1342
1343         if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
1344             src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1345                 chan_err(d40c, "Invalid src\n");
1346                 res = -EINVAL;
1347         }
1348
1349         if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1350             dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1351                 chan_err(d40c, "No event line\n");
1352                 res = -EINVAL;
1353         }
1354
1355         if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1356             (src_event_group != dst_event_group)) {
1357                 chan_err(d40c, "Invalid event group\n");
1358                 res = -EINVAL;
1359         }
1360
1361         if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1362                 /*
1363                  * DMAC HW supports it. Will be added to this driver,
1364                  * in case any dma client requires it.
1365                  */
1366                 chan_err(d40c, "periph to periph not supported\n");
1367                 res = -EINVAL;
1368         }
1369
1370         if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1371             (1 << conf->src_info.data_width) !=
1372             d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1373             (1 << conf->dst_info.data_width)) {
1374                 /*
1375                  * The DMAC hardware only supports
1376                  * src (burst x width) == dst (burst x width)
1377                  */
1378
1379                 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1380                 res = -EINVAL;
1381         }
1382
1383         return res;
1384 }
1385
1386 static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1387                                int log_event_line, bool is_log)
1388 {
1389         unsigned long flags;
1390         spin_lock_irqsave(&phy->lock, flags);
1391         if (!is_log) {
1392                 /* Physical interrupts are masked per physical full channel */
1393                 if (phy->allocated_src == D40_ALLOC_FREE &&
1394                     phy->allocated_dst == D40_ALLOC_FREE) {
1395                         phy->allocated_dst = D40_ALLOC_PHY;
1396                         phy->allocated_src = D40_ALLOC_PHY;
1397                         goto found;
1398                 } else
1399                         goto not_found;
1400         }
1401
1402         /* Logical channel */
1403         if (is_src) {
1404                 if (phy->allocated_src == D40_ALLOC_PHY)
1405                         goto not_found;
1406
1407                 if (phy->allocated_src == D40_ALLOC_FREE)
1408                         phy->allocated_src = D40_ALLOC_LOG_FREE;
1409
1410                 if (!(phy->allocated_src & (1 << log_event_line))) {
1411                         phy->allocated_src |= 1 << log_event_line;
1412                         goto found;
1413                 } else
1414                         goto not_found;
1415         } else {
1416                 if (phy->allocated_dst == D40_ALLOC_PHY)
1417                         goto not_found;
1418
1419                 if (phy->allocated_dst == D40_ALLOC_FREE)
1420                         phy->allocated_dst = D40_ALLOC_LOG_FREE;
1421
1422                 if (!(phy->allocated_dst & (1 << log_event_line))) {
1423                         phy->allocated_dst |= 1 << log_event_line;
1424                         goto found;
1425                 } else
1426                         goto not_found;
1427         }
1428
1429 not_found:
1430         spin_unlock_irqrestore(&phy->lock, flags);
1431         return false;
1432 found:
1433         spin_unlock_irqrestore(&phy->lock, flags);
1434         return true;
1435 }
1436
1437 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1438                                int log_event_line)
1439 {
1440         unsigned long flags;
1441         bool is_free = false;
1442
1443         spin_lock_irqsave(&phy->lock, flags);
1444         if (!log_event_line) {
1445                 phy->allocated_dst = D40_ALLOC_FREE;
1446                 phy->allocated_src = D40_ALLOC_FREE;
1447                 is_free = true;
1448                 goto out;
1449         }
1450
1451         /* Logical channel */
1452         if (is_src) {
1453                 phy->allocated_src &= ~(1 << log_event_line);
1454                 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1455                         phy->allocated_src = D40_ALLOC_FREE;
1456         } else {
1457                 phy->allocated_dst &= ~(1 << log_event_line);
1458                 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1459                         phy->allocated_dst = D40_ALLOC_FREE;
1460         }
1461
1462         is_free = ((phy->allocated_src | phy->allocated_dst) ==
1463                    D40_ALLOC_FREE);
1464
1465 out:
1466         spin_unlock_irqrestore(&phy->lock, flags);
1467
1468         return is_free;
1469 }
1470
1471 static int d40_allocate_channel(struct d40_chan *d40c)
1472 {
1473         int dev_type;
1474         int event_group;
1475         int event_line;
1476         struct d40_phy_res *phys;
1477         int i;
1478         int j;
1479         int log_num;
1480         bool is_src;
1481         bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1482
1483         phys = d40c->base->phy_res;
1484
1485         if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1486                 dev_type = d40c->dma_cfg.src_dev_type;
1487                 log_num = 2 * dev_type;
1488                 is_src = true;
1489         } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1490                    d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1491                 /* dst event lines are used for logical memcpy */
1492                 dev_type = d40c->dma_cfg.dst_dev_type;
1493                 log_num = 2 * dev_type + 1;
1494                 is_src = false;
1495         } else
1496                 return -EINVAL;
1497
1498         event_group = D40_TYPE_TO_GROUP(dev_type);
1499         event_line = D40_TYPE_TO_EVENT(dev_type);
1500
1501         if (!is_log) {
1502                 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1503                         /* Find physical half channel */
1504                         for (i = 0; i < d40c->base->num_phy_chans; i++) {
1505
1506                                 if (d40_alloc_mask_set(&phys[i], is_src,
1507                                                        0, is_log))
1508                                         goto found_phy;
1509                         }
1510                 } else
1511                         for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1512                                 int phy_num = j  + event_group * 2;
1513                                 for (i = phy_num; i < phy_num + 2; i++) {
1514                                         if (d40_alloc_mask_set(&phys[i],
1515                                                                is_src,
1516                                                                0,
1517                                                                is_log))
1518                                                 goto found_phy;
1519                                 }
1520                         }
1521                 return -EINVAL;
1522 found_phy:
1523                 d40c->phy_chan = &phys[i];
1524                 d40c->log_num = D40_PHY_CHAN;
1525                 goto out;
1526         }
1527         if (dev_type == -1)
1528                 return -EINVAL;
1529
1530         /* Find logical channel */
1531         for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1532                 int phy_num = j + event_group * 2;
1533                 /*
1534                  * Spread logical channels across all available physical rather
1535                  * than pack every logical channel at the first available phy
1536                  * channels.
1537                  */
1538                 if (is_src) {
1539                         for (i = phy_num; i < phy_num + 2; i++) {
1540                                 if (d40_alloc_mask_set(&phys[i], is_src,
1541                                                        event_line, is_log))
1542                                         goto found_log;
1543                         }
1544                 } else {
1545                         for (i = phy_num + 1; i >= phy_num; i--) {
1546                                 if (d40_alloc_mask_set(&phys[i], is_src,
1547                                                        event_line, is_log))
1548                                         goto found_log;
1549                         }
1550                 }
1551         }
1552         return -EINVAL;
1553
1554 found_log:
1555         d40c->phy_chan = &phys[i];
1556         d40c->log_num = log_num;
1557 out:
1558
1559         if (is_log)
1560                 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1561         else
1562                 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1563
1564         return 0;
1565
1566 }
1567
1568 static int d40_config_memcpy(struct d40_chan *d40c)
1569 {
1570         dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1571
1572         if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1573                 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1574                 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1575                 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1576                         memcpy[d40c->chan.chan_id];
1577
1578         } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1579                    dma_has_cap(DMA_SLAVE, cap)) {
1580                 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1581         } else {
1582                 chan_err(d40c, "No memcpy\n");
1583                 return -EINVAL;
1584         }
1585
1586         return 0;
1587 }
1588
1589
1590 static int d40_free_dma(struct d40_chan *d40c)
1591 {
1592
1593         int res = 0;
1594         u32 event;
1595         struct d40_phy_res *phy = d40c->phy_chan;
1596         bool is_src;
1597         struct d40_desc *d;
1598         struct d40_desc *_d;
1599
1600
1601         /* Terminate all queued and active transfers */
1602         d40_term_all(d40c);
1603
1604         /* Release client owned descriptors */
1605         if (!list_empty(&d40c->client))
1606                 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1607                         d40_pool_lli_free(d40c, d);
1608                         d40_desc_remove(d);
1609                         d40_desc_free(d40c, d);
1610                 }
1611
1612         if (phy == NULL) {
1613                 chan_err(d40c, "phy == null\n");
1614                 return -EINVAL;
1615         }
1616
1617         if (phy->allocated_src == D40_ALLOC_FREE &&
1618             phy->allocated_dst == D40_ALLOC_FREE) {
1619                 chan_err(d40c, "channel already free\n");
1620                 return -EINVAL;
1621         }
1622
1623         if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1624             d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1625                 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1626                 is_src = false;
1627         } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1628                 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1629                 is_src = true;
1630         } else {
1631                 chan_err(d40c, "Unknown direction\n");
1632                 return -EINVAL;
1633         }
1634
1635         res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1636         if (res) {
1637                 chan_err(d40c, "suspend failed\n");
1638                 return res;
1639         }
1640
1641         if (chan_is_logical(d40c)) {
1642                 /* Release logical channel, deactivate the event line */
1643
1644                 d40_config_set_event(d40c, false);
1645                 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1646
1647                 /*
1648                  * Check if there are more logical allocation
1649                  * on this phy channel.
1650                  */
1651                 if (!d40_alloc_mask_free(phy, is_src, event)) {
1652                         /* Resume the other logical channels if any */
1653                         if (d40_chan_has_events(d40c)) {
1654                                 res = d40_channel_execute_command(d40c,
1655                                                                   D40_DMA_RUN);
1656                                 if (res) {
1657                                         chan_err(d40c,
1658                                                 "Executing RUN command\n");
1659                                         return res;
1660                                 }
1661                         }
1662                         return 0;
1663                 }
1664         } else {
1665                 (void) d40_alloc_mask_free(phy, is_src, 0);
1666         }
1667
1668         /* Release physical channel */
1669         res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1670         if (res) {
1671                 chan_err(d40c, "Failed to stop channel\n");
1672                 return res;
1673         }
1674         d40c->phy_chan = NULL;
1675         d40c->configured = false;
1676         d40c->base->lookup_phy_chans[phy->num] = NULL;
1677
1678         return 0;
1679 }
1680
1681 static bool d40_is_paused(struct d40_chan *d40c)
1682 {
1683         void __iomem *chanbase = chan_base(d40c);
1684         bool is_paused = false;
1685         unsigned long flags;
1686         void __iomem *active_reg;
1687         u32 status;
1688         u32 event;
1689
1690         spin_lock_irqsave(&d40c->lock, flags);
1691
1692         if (chan_is_physical(d40c)) {
1693                 if (d40c->phy_chan->num % 2 == 0)
1694                         active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1695                 else
1696                         active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1697
1698                 status = (readl(active_reg) &
1699                           D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1700                         D40_CHAN_POS(d40c->phy_chan->num);
1701                 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1702                         is_paused = true;
1703
1704                 goto _exit;
1705         }
1706
1707         if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1708             d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1709                 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1710                 status = readl(chanbase + D40_CHAN_REG_SDLNK);
1711         } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1712                 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1713                 status = readl(chanbase + D40_CHAN_REG_SSLNK);
1714         } else {
1715                 chan_err(d40c, "Unknown direction\n");
1716                 goto _exit;
1717         }
1718
1719         status = (status & D40_EVENTLINE_MASK(event)) >>
1720                 D40_EVENTLINE_POS(event);
1721
1722         if (status != D40_DMA_RUN)
1723                 is_paused = true;
1724 _exit:
1725         spin_unlock_irqrestore(&d40c->lock, flags);
1726         return is_paused;
1727
1728 }
1729
1730
1731 static u32 stedma40_residue(struct dma_chan *chan)
1732 {
1733         struct d40_chan *d40c =
1734                 container_of(chan, struct d40_chan, chan);
1735         u32 bytes_left;
1736         unsigned long flags;
1737
1738         spin_lock_irqsave(&d40c->lock, flags);
1739         bytes_left = d40_residue(d40c);
1740         spin_unlock_irqrestore(&d40c->lock, flags);
1741
1742         return bytes_left;
1743 }
1744
1745 static int
1746 d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
1747                 struct scatterlist *sg_src, struct scatterlist *sg_dst,
1748                 unsigned int sg_len, dma_addr_t src_dev_addr,
1749                 dma_addr_t dst_dev_addr)
1750 {
1751         struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1752         struct stedma40_half_channel_info *src_info = &cfg->src_info;
1753         struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
1754         int ret;
1755
1756         ret = d40_log_sg_to_lli(sg_src, sg_len,
1757                                 src_dev_addr,
1758                                 desc->lli_log.src,
1759                                 chan->log_def.lcsp1,
1760                                 src_info->data_width,
1761                                 dst_info->data_width);
1762
1763         ret = d40_log_sg_to_lli(sg_dst, sg_len,
1764                                 dst_dev_addr,
1765                                 desc->lli_log.dst,
1766                                 chan->log_def.lcsp3,
1767                                 dst_info->data_width,
1768                                 src_info->data_width);
1769
1770         return ret < 0 ? ret : 0;
1771 }
1772
1773 static int
1774 d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
1775                 struct scatterlist *sg_src, struct scatterlist *sg_dst,
1776                 unsigned int sg_len, dma_addr_t src_dev_addr,
1777                 dma_addr_t dst_dev_addr)
1778 {
1779         struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1780         struct stedma40_half_channel_info *src_info = &cfg->src_info;
1781         struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
1782         unsigned long flags = 0;
1783         int ret;
1784
1785         if (desc->cyclic)
1786                 flags |= LLI_CYCLIC | LLI_TERM_INT;
1787
1788         ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
1789                                 desc->lli_phy.src,
1790                                 virt_to_phys(desc->lli_phy.src),
1791                                 chan->src_def_cfg,
1792                                 src_info, dst_info, flags);
1793
1794         ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
1795                                 desc->lli_phy.dst,
1796                                 virt_to_phys(desc->lli_phy.dst),
1797                                 chan->dst_def_cfg,
1798                                 dst_info, src_info, flags);
1799
1800         dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
1801                                    desc->lli_pool.size, DMA_TO_DEVICE);
1802
1803         return ret < 0 ? ret : 0;
1804 }
1805
1806
1807 static struct d40_desc *
1808 d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
1809               unsigned int sg_len, unsigned long dma_flags)
1810 {
1811         struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1812         struct d40_desc *desc;
1813         int ret;
1814
1815         desc = d40_desc_get(chan);
1816         if (!desc)
1817                 return NULL;
1818
1819         desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
1820                                         cfg->dst_info.data_width);
1821         if (desc->lli_len < 0) {
1822                 chan_err(chan, "Unaligned size\n");
1823                 goto err;
1824         }
1825
1826         ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
1827         if (ret < 0) {
1828                 chan_err(chan, "Could not allocate lli\n");
1829                 goto err;
1830         }
1831
1832
1833         desc->lli_current = 0;
1834         desc->txd.flags = dma_flags;
1835         desc->txd.tx_submit = d40_tx_submit;
1836
1837         dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
1838
1839         return desc;
1840
1841 err:
1842         d40_desc_free(chan, desc);
1843         return NULL;
1844 }
1845
1846 static dma_addr_t
1847 d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
1848 {
1849         struct stedma40_platform_data *plat = chan->base->plat_data;
1850         struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1851         dma_addr_t addr = 0;
1852
1853         if (chan->runtime_addr)
1854                 return chan->runtime_addr;
1855
1856         if (direction == DMA_FROM_DEVICE)
1857                 addr = plat->dev_rx[cfg->src_dev_type];
1858         else if (direction == DMA_TO_DEVICE)
1859                 addr = plat->dev_tx[cfg->dst_dev_type];
1860
1861         return addr;
1862 }
1863
1864 static struct dma_async_tx_descriptor *
1865 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
1866             struct scatterlist *sg_dst, unsigned int sg_len,
1867             enum dma_data_direction direction, unsigned long dma_flags)
1868 {
1869         struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
1870         dma_addr_t src_dev_addr = 0;
1871         dma_addr_t dst_dev_addr = 0;
1872         struct d40_desc *desc;
1873         unsigned long flags;
1874         int ret;
1875
1876         if (!chan->phy_chan) {
1877                 chan_err(chan, "Cannot prepare unallocated channel\n");
1878                 return NULL;
1879         }
1880
1881
1882         spin_lock_irqsave(&chan->lock, flags);
1883
1884         desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
1885         if (desc == NULL)
1886                 goto err;
1887
1888         if (sg_next(&sg_src[sg_len - 1]) == sg_src)
1889                 desc->cyclic = true;
1890
1891         if (direction != DMA_NONE) {
1892                 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
1893
1894                 if (direction == DMA_FROM_DEVICE)
1895                         src_dev_addr = dev_addr;
1896                 else if (direction == DMA_TO_DEVICE)
1897                         dst_dev_addr = dev_addr;
1898         }
1899
1900         if (chan_is_logical(chan))
1901                 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
1902                                       sg_len, src_dev_addr, dst_dev_addr);
1903         else
1904                 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
1905                                       sg_len, src_dev_addr, dst_dev_addr);
1906
1907         if (ret) {
1908                 chan_err(chan, "Failed to prepare %s sg job: %d\n",
1909                          chan_is_logical(chan) ? "log" : "phy", ret);
1910                 goto err;
1911         }
1912
1913         spin_unlock_irqrestore(&chan->lock, flags);
1914
1915         return &desc->txd;
1916
1917 err:
1918         if (desc)
1919                 d40_desc_free(chan, desc);
1920         spin_unlock_irqrestore(&chan->lock, flags);
1921         return NULL;
1922 }
1923
1924 bool stedma40_filter(struct dma_chan *chan, void *data)
1925 {
1926         struct stedma40_chan_cfg *info = data;
1927         struct d40_chan *d40c =
1928                 container_of(chan, struct d40_chan, chan);
1929         int err;
1930
1931         if (data) {
1932                 err = d40_validate_conf(d40c, info);
1933                 if (!err)
1934                         d40c->dma_cfg = *info;
1935         } else
1936                 err = d40_config_memcpy(d40c);
1937
1938         if (!err)
1939                 d40c->configured = true;
1940
1941         return err == 0;
1942 }
1943 EXPORT_SYMBOL(stedma40_filter);
1944
1945 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
1946 {
1947         bool realtime = d40c->dma_cfg.realtime;
1948         bool highprio = d40c->dma_cfg.high_priority;
1949         u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
1950         u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
1951         u32 event = D40_TYPE_TO_EVENT(dev_type);
1952         u32 group = D40_TYPE_TO_GROUP(dev_type);
1953         u32 bit = 1 << event;
1954
1955         /* Destination event lines are stored in the upper halfword */
1956         if (!src)
1957                 bit <<= 16;
1958
1959         writel(bit, d40c->base->virtbase + prioreg + group * 4);
1960         writel(bit, d40c->base->virtbase + rtreg + group * 4);
1961 }
1962
1963 static void d40_set_prio_realtime(struct d40_chan *d40c)
1964 {
1965         if (d40c->base->rev < 3)
1966                 return;
1967
1968         if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
1969             (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1970                 __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
1971
1972         if ((d40c->dma_cfg.dir ==  STEDMA40_MEM_TO_PERIPH) ||
1973             (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1974                 __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
1975 }
1976
1977 /* DMA ENGINE functions */
1978 static int d40_alloc_chan_resources(struct dma_chan *chan)
1979 {
1980         int err;
1981         unsigned long flags;
1982         struct d40_chan *d40c =
1983                 container_of(chan, struct d40_chan, chan);
1984         bool is_free_phy;
1985         spin_lock_irqsave(&d40c->lock, flags);
1986
1987         d40c->completed = chan->cookie = 1;
1988
1989         /* If no dma configuration is set use default configuration (memcpy) */
1990         if (!d40c->configured) {
1991                 err = d40_config_memcpy(d40c);
1992                 if (err) {
1993                         chan_err(d40c, "Failed to configure memcpy channel\n");
1994                         goto fail;
1995                 }
1996         }
1997         is_free_phy = (d40c->phy_chan == NULL);
1998
1999         err = d40_allocate_channel(d40c);
2000         if (err) {
2001                 chan_err(d40c, "Failed to allocate channel\n");
2002                 goto fail;
2003         }
2004
2005         /* Fill in basic CFG register values */
2006         d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
2007                     &d40c->dst_def_cfg, chan_is_logical(d40c));
2008
2009         d40_set_prio_realtime(d40c);
2010
2011         if (chan_is_logical(d40c)) {
2012                 d40_log_cfg(&d40c->dma_cfg,
2013                             &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2014
2015                 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
2016                         d40c->lcpa = d40c->base->lcpa_base +
2017                           d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
2018                 else
2019                         d40c->lcpa = d40c->base->lcpa_base +
2020                           d40c->dma_cfg.dst_dev_type *
2021                           D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2022         }
2023
2024         /*
2025          * Only write channel configuration to the DMA if the physical
2026          * resource is free. In case of multiple logical channels
2027          * on the same physical resource, only the first write is necessary.
2028          */
2029         if (is_free_phy)
2030                 d40_config_write(d40c);
2031 fail:
2032         spin_unlock_irqrestore(&d40c->lock, flags);
2033         return err;
2034 }
2035
2036 static void d40_free_chan_resources(struct dma_chan *chan)
2037 {
2038         struct d40_chan *d40c =
2039                 container_of(chan, struct d40_chan, chan);
2040         int err;
2041         unsigned long flags;
2042
2043         if (d40c->phy_chan == NULL) {
2044                 chan_err(d40c, "Cannot free unallocated channel\n");
2045                 return;
2046         }
2047
2048
2049         spin_lock_irqsave(&d40c->lock, flags);
2050
2051         err = d40_free_dma(d40c);
2052
2053         if (err)
2054                 chan_err(d40c, "Failed to free channel\n");
2055         spin_unlock_irqrestore(&d40c->lock, flags);
2056 }
2057
2058 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2059                                                        dma_addr_t dst,
2060                                                        dma_addr_t src,
2061                                                        size_t size,
2062                                                        unsigned long dma_flags)
2063 {
2064         struct scatterlist dst_sg;
2065         struct scatterlist src_sg;
2066
2067         sg_init_table(&dst_sg, 1);
2068         sg_init_table(&src_sg, 1);
2069
2070         sg_dma_address(&dst_sg) = dst;
2071         sg_dma_address(&src_sg) = src;
2072
2073         sg_dma_len(&dst_sg) = size;
2074         sg_dma_len(&src_sg) = size;
2075
2076         return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
2077 }
2078
2079 static struct dma_async_tx_descriptor *
2080 d40_prep_memcpy_sg(struct dma_chan *chan,
2081                    struct scatterlist *dst_sg, unsigned int dst_nents,
2082                    struct scatterlist *src_sg, unsigned int src_nents,
2083                    unsigned long dma_flags)
2084 {
2085         if (dst_nents != src_nents)
2086                 return NULL;
2087
2088         return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
2089 }
2090
2091 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2092                                                          struct scatterlist *sgl,
2093                                                          unsigned int sg_len,
2094                                                          enum dma_data_direction direction,
2095                                                          unsigned long dma_flags)
2096 {
2097         if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
2098                 return NULL;
2099
2100         return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2101 }
2102
2103 static struct dma_async_tx_descriptor *
2104 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2105                      size_t buf_len, size_t period_len,
2106                      enum dma_data_direction direction)
2107 {
2108         unsigned int periods = buf_len / period_len;
2109         struct dma_async_tx_descriptor *txd;
2110         struct scatterlist *sg;
2111         int i;
2112
2113         sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2114         for (i = 0; i < periods; i++) {
2115                 sg_dma_address(&sg[i]) = dma_addr;
2116                 sg_dma_len(&sg[i]) = period_len;
2117                 dma_addr += period_len;
2118         }
2119
2120         sg[periods].offset = 0;
2121         sg[periods].length = 0;
2122         sg[periods].page_link =
2123                 ((unsigned long)sg | 0x01) & ~0x02;
2124
2125         txd = d40_prep_sg(chan, sg, sg, periods, direction,
2126                           DMA_PREP_INTERRUPT);
2127
2128         kfree(sg);
2129
2130         return txd;
2131 }
2132
2133 static enum dma_status d40_tx_status(struct dma_chan *chan,
2134                                      dma_cookie_t cookie,
2135                                      struct dma_tx_state *txstate)
2136 {
2137         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2138         dma_cookie_t last_used;
2139         dma_cookie_t last_complete;
2140         int ret;
2141
2142         if (d40c->phy_chan == NULL) {
2143                 chan_err(d40c, "Cannot read status of unallocated channel\n");
2144                 return -EINVAL;
2145         }
2146
2147         last_complete = d40c->completed;
2148         last_used = chan->cookie;
2149
2150         if (d40_is_paused(d40c))
2151                 ret = DMA_PAUSED;
2152         else
2153                 ret = dma_async_is_complete(cookie, last_complete, last_used);
2154
2155         dma_set_tx_state(txstate, last_complete, last_used,
2156                          stedma40_residue(chan));
2157
2158         return ret;
2159 }
2160
2161 static void d40_issue_pending(struct dma_chan *chan)
2162 {
2163         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2164         unsigned long flags;
2165
2166         if (d40c->phy_chan == NULL) {
2167                 chan_err(d40c, "Channel is not allocated!\n");
2168                 return;
2169         }
2170
2171         spin_lock_irqsave(&d40c->lock, flags);
2172
2173         list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2174
2175         /* Busy means that queued jobs are already being processed */
2176         if (!d40c->busy)
2177                 (void) d40_queue_start(d40c);
2178
2179         spin_unlock_irqrestore(&d40c->lock, flags);
2180 }
2181
2182 static int
2183 dma40_config_to_halfchannel(struct d40_chan *d40c,
2184                             struct stedma40_half_channel_info *info,
2185                             enum dma_slave_buswidth width,
2186                             u32 maxburst)
2187 {
2188         enum stedma40_periph_data_width addr_width;
2189         int psize;
2190
2191         switch (width) {
2192         case DMA_SLAVE_BUSWIDTH_1_BYTE:
2193                 addr_width = STEDMA40_BYTE_WIDTH;
2194                 break;
2195         case DMA_SLAVE_BUSWIDTH_2_BYTES:
2196                 addr_width = STEDMA40_HALFWORD_WIDTH;
2197                 break;
2198         case DMA_SLAVE_BUSWIDTH_4_BYTES:
2199                 addr_width = STEDMA40_WORD_WIDTH;
2200                 break;
2201         case DMA_SLAVE_BUSWIDTH_8_BYTES:
2202                 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2203                 break;
2204         default:
2205                 dev_err(d40c->base->dev,
2206                         "illegal peripheral address width "
2207                         "requested (%d)\n",
2208                         width);
2209                 return -EINVAL;
2210         }
2211
2212         if (chan_is_logical(d40c)) {
2213                 if (maxburst >= 16)
2214                         psize = STEDMA40_PSIZE_LOG_16;
2215                 else if (maxburst >= 8)
2216                         psize = STEDMA40_PSIZE_LOG_8;
2217                 else if (maxburst >= 4)
2218                         psize = STEDMA40_PSIZE_LOG_4;
2219                 else
2220                         psize = STEDMA40_PSIZE_LOG_1;
2221         } else {
2222                 if (maxburst >= 16)
2223                         psize = STEDMA40_PSIZE_PHY_16;
2224                 else if (maxburst >= 8)
2225                         psize = STEDMA40_PSIZE_PHY_8;
2226                 else if (maxburst >= 4)
2227                         psize = STEDMA40_PSIZE_PHY_4;
2228                 else
2229                         psize = STEDMA40_PSIZE_PHY_1;
2230         }
2231
2232         info->data_width = addr_width;
2233         info->psize = psize;
2234         info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2235
2236         return 0;
2237 }
2238
2239 /* Runtime reconfiguration extension */
2240 static int d40_set_runtime_config(struct dma_chan *chan,
2241                                   struct dma_slave_config *config)
2242 {
2243         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2244         struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2245         enum dma_slave_buswidth src_addr_width, dst_addr_width;
2246         dma_addr_t config_addr;
2247         u32 src_maxburst, dst_maxburst;
2248         int ret;
2249
2250         src_addr_width = config->src_addr_width;
2251         src_maxburst = config->src_maxburst;
2252         dst_addr_width = config->dst_addr_width;
2253         dst_maxburst = config->dst_maxburst;
2254
2255         if (config->direction == DMA_FROM_DEVICE) {
2256                 dma_addr_t dev_addr_rx =
2257                         d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2258
2259                 config_addr = config->src_addr;
2260                 if (dev_addr_rx)
2261                         dev_dbg(d40c->base->dev,
2262                                 "channel has a pre-wired RX address %08x "
2263                                 "overriding with %08x\n",
2264                                 dev_addr_rx, config_addr);
2265                 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2266                         dev_dbg(d40c->base->dev,
2267                                 "channel was not configured for peripheral "
2268                                 "to memory transfer (%d) overriding\n",
2269                                 cfg->dir);
2270                 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2271
2272                 /* Configure the memory side */
2273                 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2274                         dst_addr_width = src_addr_width;
2275                 if (dst_maxburst == 0)
2276                         dst_maxburst = src_maxburst;
2277
2278         } else if (config->direction == DMA_TO_DEVICE) {
2279                 dma_addr_t dev_addr_tx =
2280                         d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2281
2282                 config_addr = config->dst_addr;
2283                 if (dev_addr_tx)
2284                         dev_dbg(d40c->base->dev,
2285                                 "channel has a pre-wired TX address %08x "
2286                                 "overriding with %08x\n",
2287                                 dev_addr_tx, config_addr);
2288                 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2289                         dev_dbg(d40c->base->dev,
2290                                 "channel was not configured for memory "
2291                                 "to peripheral transfer (%d) overriding\n",
2292                                 cfg->dir);
2293                 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2294
2295                 /* Configure the memory side */
2296                 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2297                         src_addr_width = dst_addr_width;
2298                 if (src_maxburst == 0)
2299                         src_maxburst = dst_maxburst;
2300         } else {
2301                 dev_err(d40c->base->dev,
2302                         "unrecognized channel direction %d\n",
2303                         config->direction);
2304                 return -EINVAL;
2305         }
2306
2307         if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2308                 dev_err(d40c->base->dev,
2309                         "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2310                         src_maxburst,
2311                         src_addr_width,
2312                         dst_maxburst,
2313                         dst_addr_width);
2314                 return -EINVAL;
2315         }
2316
2317         ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2318                                           src_addr_width,
2319                                           src_maxburst);
2320         if (ret)
2321                 return ret;
2322
2323         ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2324                                           dst_addr_width,
2325                                           dst_maxburst);
2326         if (ret)
2327                 return ret;
2328
2329         /* Fill in register values */
2330         if (chan_is_logical(d40c))
2331                 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2332         else
2333                 d40_phy_cfg(cfg, &d40c->src_def_cfg,
2334                             &d40c->dst_def_cfg, false);
2335
2336         /* These settings will take precedence later */
2337         d40c->runtime_addr = config_addr;
2338         d40c->runtime_direction = config->direction;
2339         dev_dbg(d40c->base->dev,
2340                 "configured channel %s for %s, data width %d/%d, "
2341                 "maxburst %d/%d elements, LE, no flow control\n",
2342                 dma_chan_name(chan),
2343                 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
2344                 src_addr_width, dst_addr_width,
2345                 src_maxburst, dst_maxburst);
2346
2347         return 0;
2348 }
2349
2350 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2351                        unsigned long arg)
2352 {
2353         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2354
2355         if (d40c->phy_chan == NULL) {
2356                 chan_err(d40c, "Channel is not allocated!\n");
2357                 return -EINVAL;
2358         }
2359
2360         switch (cmd) {
2361         case DMA_TERMINATE_ALL:
2362                 return d40_terminate_all(d40c);
2363         case DMA_PAUSE:
2364                 return d40_pause(d40c);
2365         case DMA_RESUME:
2366                 return d40_resume(d40c);
2367         case DMA_SLAVE_CONFIG:
2368                 return d40_set_runtime_config(chan,
2369                         (struct dma_slave_config *) arg);
2370         default:
2371                 break;
2372         }
2373
2374         /* Other commands are unimplemented */
2375         return -ENXIO;
2376 }
2377
2378 /* Initialization functions */
2379
2380 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2381                                  struct d40_chan *chans, int offset,
2382                                  int num_chans)
2383 {
2384         int i = 0;
2385         struct d40_chan *d40c;
2386
2387         INIT_LIST_HEAD(&dma->channels);
2388
2389         for (i = offset; i < offset + num_chans; i++) {
2390                 d40c = &chans[i];
2391                 d40c->base = base;
2392                 d40c->chan.device = dma;
2393
2394                 spin_lock_init(&d40c->lock);
2395
2396                 d40c->log_num = D40_PHY_CHAN;
2397
2398                 INIT_LIST_HEAD(&d40c->active);
2399                 INIT_LIST_HEAD(&d40c->queue);
2400                 INIT_LIST_HEAD(&d40c->pending_queue);
2401                 INIT_LIST_HEAD(&d40c->client);
2402
2403                 tasklet_init(&d40c->tasklet, dma_tasklet,
2404                              (unsigned long) d40c);
2405
2406                 list_add_tail(&d40c->chan.device_node,
2407                               &dma->channels);
2408         }
2409 }
2410
2411 static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2412 {
2413         if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2414                 dev->device_prep_slave_sg = d40_prep_slave_sg;
2415
2416         if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2417                 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2418
2419                 /*
2420                  * This controller can only access address at even
2421                  * 32bit boundaries, i.e. 2^2
2422                  */
2423                 dev->copy_align = 2;
2424         }
2425
2426         if (dma_has_cap(DMA_SG, dev->cap_mask))
2427                 dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2428
2429         if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2430                 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2431
2432         dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2433         dev->device_free_chan_resources = d40_free_chan_resources;
2434         dev->device_issue_pending = d40_issue_pending;
2435         dev->device_tx_status = d40_tx_status;
2436         dev->device_control = d40_control;
2437         dev->dev = base->dev;
2438 }
2439
2440 static int __init d40_dmaengine_init(struct d40_base *base,
2441                                      int num_reserved_chans)
2442 {
2443         int err ;
2444
2445         d40_chan_init(base, &base->dma_slave, base->log_chans,
2446                       0, base->num_log_chans);
2447
2448         dma_cap_zero(base->dma_slave.cap_mask);
2449         dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2450         dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2451
2452         d40_ops_init(base, &base->dma_slave);
2453
2454         err = dma_async_device_register(&base->dma_slave);
2455
2456         if (err) {
2457                 d40_err(base->dev, "Failed to register slave channels\n");
2458                 goto failure1;
2459         }
2460
2461         d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2462                       base->num_log_chans, base->plat_data->memcpy_len);
2463
2464         dma_cap_zero(base->dma_memcpy.cap_mask);
2465         dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2466         dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
2467
2468         d40_ops_init(base, &base->dma_memcpy);
2469
2470         err = dma_async_device_register(&base->dma_memcpy);
2471
2472         if (err) {
2473                 d40_err(base->dev,
2474                         "Failed to regsiter memcpy only channels\n");
2475                 goto failure2;
2476         }
2477
2478         d40_chan_init(base, &base->dma_both, base->phy_chans,
2479                       0, num_reserved_chans);
2480
2481         dma_cap_zero(base->dma_both.cap_mask);
2482         dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2483         dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2484         dma_cap_set(DMA_SG, base->dma_both.cap_mask);
2485         dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2486
2487         d40_ops_init(base, &base->dma_both);
2488         err = dma_async_device_register(&base->dma_both);
2489
2490         if (err) {
2491                 d40_err(base->dev,
2492                         "Failed to register logical and physical capable channels\n");
2493                 goto failure3;
2494         }
2495         return 0;
2496 failure3:
2497         dma_async_device_unregister(&base->dma_memcpy);
2498 failure2:
2499         dma_async_device_unregister(&base->dma_slave);
2500 failure1:
2501         return err;
2502 }
2503
2504 /* Initialization functions. */
2505
2506 static int __init d40_phy_res_init(struct d40_base *base)
2507 {
2508         int i;
2509         int num_phy_chans_avail = 0;
2510         u32 val[2];
2511         int odd_even_bit = -2;
2512
2513         val[0] = readl(base->virtbase + D40_DREG_PRSME);
2514         val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2515
2516         for (i = 0; i < base->num_phy_chans; i++) {
2517                 base->phy_res[i].num = i;
2518                 odd_even_bit += 2 * ((i % 2) == 0);
2519                 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2520                         /* Mark security only channels as occupied */
2521                         base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2522                         base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2523                 } else {
2524                         base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2525                         base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2526                         num_phy_chans_avail++;
2527                 }
2528                 spin_lock_init(&base->phy_res[i].lock);
2529         }
2530
2531         /* Mark disabled channels as occupied */
2532         for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2533                 int chan = base->plat_data->disabled_channels[i];
2534
2535                 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
2536                 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
2537                 num_phy_chans_avail--;
2538         }
2539
2540         dev_info(base->dev, "%d of %d physical DMA channels available\n",
2541                  num_phy_chans_avail, base->num_phy_chans);
2542
2543         /* Verify settings extended vs standard */
2544         val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2545
2546         for (i = 0; i < base->num_phy_chans; i++) {
2547
2548                 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2549                     (val[0] & 0x3) != 1)
2550                         dev_info(base->dev,
2551                                  "[%s] INFO: channel %d is misconfigured (%d)\n",
2552                                  __func__, i, val[0] & 0x3);
2553
2554                 val[0] = val[0] >> 2;
2555         }
2556
2557         return num_phy_chans_avail;
2558 }
2559
2560 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2561 {
2562         struct stedma40_platform_data *plat_data;
2563         struct clk *clk = NULL;
2564         void __iomem *virtbase = NULL;
2565         struct resource *res = NULL;
2566         struct d40_base *base = NULL;
2567         int num_log_chans = 0;
2568         int num_phy_chans;
2569         int i;
2570         u32 pid;
2571         u32 cid;
2572         u8 rev;
2573
2574         clk = clk_get(&pdev->dev, NULL);
2575
2576         if (IS_ERR(clk)) {
2577                 d40_err(&pdev->dev, "No matching clock found\n");
2578                 goto failure;
2579         }
2580
2581         clk_enable(clk);
2582
2583         /* Get IO for DMAC base address */
2584         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2585         if (!res)
2586                 goto failure;
2587
2588         if (request_mem_region(res->start, resource_size(res),
2589                                D40_NAME " I/O base") == NULL)
2590                 goto failure;
2591
2592         virtbase = ioremap(res->start, resource_size(res));
2593         if (!virtbase)
2594                 goto failure;
2595
2596         /* This is just a regular AMBA PrimeCell ID actually */
2597         for (pid = 0, i = 0; i < 4; i++)
2598                 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
2599                         & 255) << (i * 8);
2600         for (cid = 0, i = 0; i < 4; i++)
2601                 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
2602                         & 255) << (i * 8);
2603
2604         if (cid != AMBA_CID) {
2605                 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
2606                 goto failure;
2607         }
2608         if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
2609                 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
2610                         AMBA_MANF_BITS(pid),
2611                         AMBA_VENDOR_ST);
2612                 goto failure;
2613         }
2614         /*
2615          * HW revision:
2616          * DB8500ed has revision 0
2617          * ? has revision 1
2618          * DB8500v1 has revision 2
2619          * DB8500v2 has revision 3
2620          */
2621         rev = AMBA_REV_BITS(pid);
2622
2623         /* The number of physical channels on this HW */
2624         num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2625
2626         dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2627                  rev, res->start);
2628
2629         plat_data = pdev->dev.platform_data;
2630
2631         /* Count the number of logical channels in use */
2632         for (i = 0; i < plat_data->dev_len; i++)
2633                 if (plat_data->dev_rx[i] != 0)
2634                         num_log_chans++;
2635
2636         for (i = 0; i < plat_data->dev_len; i++)
2637                 if (plat_data->dev_tx[i] != 0)
2638                         num_log_chans++;
2639
2640         base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2641                        (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2642                        sizeof(struct d40_chan), GFP_KERNEL);
2643
2644         if (base == NULL) {
2645                 d40_err(&pdev->dev, "Out of memory\n");
2646                 goto failure;
2647         }
2648
2649         base->rev = rev;
2650         base->clk = clk;
2651         base->num_phy_chans = num_phy_chans;
2652         base->num_log_chans = num_log_chans;
2653         base->phy_start = res->start;
2654         base->phy_size = resource_size(res);
2655         base->virtbase = virtbase;
2656         base->plat_data = plat_data;
2657         base->dev = &pdev->dev;
2658         base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2659         base->log_chans = &base->phy_chans[num_phy_chans];
2660
2661         base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2662                                 GFP_KERNEL);
2663         if (!base->phy_res)
2664                 goto failure;
2665
2666         base->lookup_phy_chans = kzalloc(num_phy_chans *
2667                                          sizeof(struct d40_chan *),
2668                                          GFP_KERNEL);
2669         if (!base->lookup_phy_chans)
2670                 goto failure;
2671
2672         if (num_log_chans + plat_data->memcpy_len) {
2673                 /*
2674                  * The max number of logical channels are event lines for all
2675                  * src devices and dst devices
2676                  */
2677                 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2678                                                  sizeof(struct d40_chan *),
2679                                                  GFP_KERNEL);
2680                 if (!base->lookup_log_chans)
2681                         goto failure;
2682         }
2683
2684         base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2685                                             sizeof(struct d40_desc *) *
2686                                             D40_LCLA_LINK_PER_EVENT_GRP,
2687                                             GFP_KERNEL);
2688         if (!base->lcla_pool.alloc_map)
2689                 goto failure;
2690
2691         base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2692                                             0, SLAB_HWCACHE_ALIGN,
2693                                             NULL);
2694         if (base->desc_slab == NULL)
2695                 goto failure;
2696
2697         return base;
2698
2699 failure:
2700         if (!IS_ERR(clk)) {
2701                 clk_disable(clk);
2702                 clk_put(clk);
2703         }
2704         if (virtbase)
2705                 iounmap(virtbase);
2706         if (res)
2707                 release_mem_region(res->start,
2708                                    resource_size(res));
2709         if (virtbase)
2710                 iounmap(virtbase);
2711
2712         if (base) {
2713                 kfree(base->lcla_pool.alloc_map);
2714                 kfree(base->lookup_log_chans);
2715                 kfree(base->lookup_phy_chans);
2716                 kfree(base->phy_res);
2717                 kfree(base);
2718         }
2719
2720         return NULL;
2721 }
2722
2723 static void __init d40_hw_init(struct d40_base *base)
2724 {
2725
2726         static const struct d40_reg_val dma_init_reg[] = {
2727                 /* Clock every part of the DMA block from start */
2728                 { .reg = D40_DREG_GCC,    .val = 0x0000ff01},
2729
2730                 /* Interrupts on all logical channels */
2731                 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2732                 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2733                 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2734                 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2735                 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2736                 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2737                 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2738                 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2739                 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2740                 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2741                 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2742                 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2743         };
2744         int i;
2745         u32 prmseo[2] = {0, 0};
2746         u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2747         u32 pcmis = 0;
2748         u32 pcicr = 0;
2749
2750         for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2751                 writel(dma_init_reg[i].val,
2752                        base->virtbase + dma_init_reg[i].reg);
2753
2754         /* Configure all our dma channels to default settings */
2755         for (i = 0; i < base->num_phy_chans; i++) {
2756
2757                 activeo[i % 2] = activeo[i % 2] << 2;
2758
2759                 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2760                     == D40_ALLOC_PHY) {
2761                         activeo[i % 2] |= 3;
2762                         continue;
2763                 }
2764
2765                 /* Enable interrupt # */
2766                 pcmis = (pcmis << 1) | 1;
2767
2768                 /* Clear interrupt # */
2769                 pcicr = (pcicr << 1) | 1;
2770
2771                 /* Set channel to physical mode */
2772                 prmseo[i % 2] = prmseo[i % 2] << 2;
2773                 prmseo[i % 2] |= 1;
2774
2775         }
2776
2777         writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2778         writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2779         writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2780         writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2781
2782         /* Write which interrupt to enable */
2783         writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2784
2785         /* Write which interrupt to clear */
2786         writel(pcicr, base->virtbase + D40_DREG_PCICR);
2787
2788 }
2789
2790 static int __init d40_lcla_allocate(struct d40_base *base)
2791 {
2792         struct d40_lcla_pool *pool = &base->lcla_pool;
2793         unsigned long *page_list;
2794         int i, j;
2795         int ret = 0;
2796
2797         /*
2798          * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2799          * To full fill this hardware requirement without wasting 256 kb
2800          * we allocate pages until we get an aligned one.
2801          */
2802         page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2803                             GFP_KERNEL);
2804
2805         if (!page_list) {
2806                 ret = -ENOMEM;
2807                 goto failure;
2808         }
2809
2810         /* Calculating how many pages that are required */
2811         base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2812
2813         for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2814                 page_list[i] = __get_free_pages(GFP_KERNEL,
2815                                                 base->lcla_pool.pages);
2816                 if (!page_list[i]) {
2817
2818                         d40_err(base->dev, "Failed to allocate %d pages.\n",
2819                                 base->lcla_pool.pages);
2820
2821                         for (j = 0; j < i; j++)
2822                                 free_pages(page_list[j], base->lcla_pool.pages);
2823                         goto failure;
2824                 }
2825
2826                 if ((virt_to_phys((void *)page_list[i]) &
2827                      (LCLA_ALIGNMENT - 1)) == 0)
2828                         break;
2829         }
2830
2831         for (j = 0; j < i; j++)
2832                 free_pages(page_list[j], base->lcla_pool.pages);
2833
2834         if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2835                 base->lcla_pool.base = (void *)page_list[i];
2836         } else {
2837                 /*
2838                  * After many attempts and no succees with finding the correct
2839                  * alignment, try with allocating a big buffer.
2840                  */
2841                 dev_warn(base->dev,
2842                          "[%s] Failed to get %d pages @ 18 bit align.\n",
2843                          __func__, base->lcla_pool.pages);
2844                 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2845                                                          base->num_phy_chans +
2846                                                          LCLA_ALIGNMENT,
2847                                                          GFP_KERNEL);
2848                 if (!base->lcla_pool.base_unaligned) {
2849                         ret = -ENOMEM;
2850                         goto failure;
2851                 }
2852
2853                 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2854                                                  LCLA_ALIGNMENT);
2855         }
2856
2857         pool->dma_addr = dma_map_single(base->dev, pool->base,
2858                                         SZ_1K * base->num_phy_chans,
2859                                         DMA_TO_DEVICE);
2860         if (dma_mapping_error(base->dev, pool->dma_addr)) {
2861                 pool->dma_addr = 0;
2862                 ret = -ENOMEM;
2863                 goto failure;
2864         }
2865
2866         writel(virt_to_phys(base->lcla_pool.base),
2867                base->virtbase + D40_DREG_LCLA);
2868 failure:
2869         kfree(page_list);
2870         return ret;
2871 }
2872
2873 static int __init d40_probe(struct platform_device *pdev)
2874 {
2875         int err;
2876         int ret = -ENOENT;
2877         struct d40_base *base;
2878         struct resource *res = NULL;
2879         int num_reserved_chans;
2880         u32 val;
2881
2882         base = d40_hw_detect_init(pdev);
2883
2884         if (!base)
2885                 goto failure;
2886
2887         num_reserved_chans = d40_phy_res_init(base);
2888
2889         platform_set_drvdata(pdev, base);
2890
2891         spin_lock_init(&base->interrupt_lock);
2892         spin_lock_init(&base->execmd_lock);
2893
2894         /* Get IO for logical channel parameter address */
2895         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2896         if (!res) {
2897                 ret = -ENOENT;
2898                 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
2899                 goto failure;
2900         }
2901         base->lcpa_size = resource_size(res);
2902         base->phy_lcpa = res->start;
2903
2904         if (request_mem_region(res->start, resource_size(res),
2905                                D40_NAME " I/O lcpa") == NULL) {
2906                 ret = -EBUSY;
2907                 d40_err(&pdev->dev,
2908                         "Failed to request LCPA region 0x%x-0x%x\n",
2909                         res->start, res->end);
2910                 goto failure;
2911         }
2912
2913         /* We make use of ESRAM memory for this. */
2914         val = readl(base->virtbase + D40_DREG_LCPA);
2915         if (res->start != val && val != 0) {
2916                 dev_warn(&pdev->dev,
2917                          "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2918                          __func__, val, res->start);
2919         } else
2920                 writel(res->start, base->virtbase + D40_DREG_LCPA);
2921
2922         base->lcpa_base = ioremap(res->start, resource_size(res));
2923         if (!base->lcpa_base) {
2924                 ret = -ENOMEM;
2925                 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
2926                 goto failure;
2927         }
2928
2929         ret = d40_lcla_allocate(base);
2930         if (ret) {
2931                 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
2932                 goto failure;
2933         }
2934
2935         spin_lock_init(&base->lcla_pool.lock);
2936
2937         base->irq = platform_get_irq(pdev, 0);
2938
2939         ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2940         if (ret) {
2941                 d40_err(&pdev->dev, "No IRQ defined\n");
2942                 goto failure;
2943         }
2944
2945         err = d40_dmaengine_init(base, num_reserved_chans);
2946         if (err)
2947                 goto failure;
2948
2949         d40_hw_init(base);
2950
2951         dev_info(base->dev, "initialized\n");
2952         return 0;
2953
2954 failure:
2955         if (base) {
2956                 if (base->desc_slab)
2957                         kmem_cache_destroy(base->desc_slab);
2958                 if (base->virtbase)
2959                         iounmap(base->virtbase);
2960
2961                 if (base->lcla_pool.dma_addr)
2962                         dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
2963                                          SZ_1K * base->num_phy_chans,
2964                                          DMA_TO_DEVICE);
2965
2966                 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2967                         free_pages((unsigned long)base->lcla_pool.base,
2968                                    base->lcla_pool.pages);
2969
2970                 kfree(base->lcla_pool.base_unaligned);
2971
2972                 if (base->phy_lcpa)
2973                         release_mem_region(base->phy_lcpa,
2974                                            base->lcpa_size);
2975                 if (base->phy_start)
2976                         release_mem_region(base->phy_start,
2977                                            base->phy_size);
2978                 if (base->clk) {
2979                         clk_disable(base->clk);
2980                         clk_put(base->clk);
2981                 }
2982
2983                 kfree(base->lcla_pool.alloc_map);
2984                 kfree(base->lookup_log_chans);
2985                 kfree(base->lookup_phy_chans);
2986                 kfree(base->phy_res);
2987                 kfree(base);
2988         }
2989
2990         d40_err(&pdev->dev, "probe failed\n");
2991         return ret;
2992 }
2993
2994 static struct platform_driver d40_driver = {
2995         .driver = {
2996                 .owner = THIS_MODULE,
2997                 .name  = D40_NAME,
2998         },
2999 };
3000
3001 static int __init stedma40_init(void)
3002 {
3003         return platform_driver_probe(&d40_driver, d40_probe);
3004 }
3005 subsys_initcall(stedma40_init);