arm: tegra: sd: enable sd dpd
[linux-2.6.git] / arch / arm / mach-tegra / dma.c
1 /*
2  * arch/arm/mach-tegra/dma.c
3  *
4  * System DMA driver for NVIDIA Tegra SoCs
5  *
6  * Copyright (c) 2008-2012, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/io.h>
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/err.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <linux/clk.h>
31 #include <linux/syscore_ops.h>
32 #include <mach/dma.h>
33 #include <mach/irqs.h>
34 #include <mach/iomap.h>
35 #include <mach/clk.h>
36
37 #define APB_DMA_GEN                             0x000
38 #define GEN_ENABLE                              (1<<31)
39
40 #define APB_DMA_CNTRL                           0x010
41
42 #define APB_DMA_IRQ_MASK                        0x01c
43
44 #define APB_DMA_IRQ_MASK_SET                    0x020
45
46 #define APB_DMA_CHAN_CSR                        0x000
47 #define CSR_ENB                                 (1<<31)
48 #define CSR_IE_EOC                              (1<<30)
49 #define CSR_HOLD                                (1<<29)
50 #define CSR_DIR                                 (1<<28)
51 #define CSR_ONCE                                (1<<27)
52 #define CSR_FLOW                                (1<<21)
53 #define CSR_REQ_SEL_SHIFT                       16
54 #define CSR_WCOUNT_SHIFT                        2
55 #define CSR_WCOUNT_MASK                         0xFFFC
56
57 #define APB_DMA_CHAN_STA                        0x004
58 #define STA_BUSY                                (1<<31)
59 #define STA_ISE_EOC                             (1<<30)
60 #define STA_HALT                                (1<<29)
61 #define STA_PING_PONG                           (1<<28)
62 #define STA_COUNT_SHIFT                         2
63 #define STA_COUNT_MASK                          0xFFFC
64
65 #define APB_DMA_CHAN_AHB_PTR                    0x010
66
67 #define APB_DMA_CHAN_AHB_SEQ                    0x014
68 #define AHB_SEQ_INTR_ENB                        (1<<31)
69 #define AHB_SEQ_BUS_WIDTH_SHIFT                 28
70 #define AHB_SEQ_BUS_WIDTH_MASK                  (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
71 #define AHB_SEQ_BUS_WIDTH_8                     (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
72 #define AHB_SEQ_BUS_WIDTH_16                    (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
73 #define AHB_SEQ_BUS_WIDTH_32                    (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
74 #define AHB_SEQ_BUS_WIDTH_64                    (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
75 #define AHB_SEQ_BUS_WIDTH_128                   (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
76 #define AHB_SEQ_DATA_SWAP                       (1<<27)
77 #define AHB_SEQ_BURST_MASK                      (0x7<<24)
78 #define AHB_SEQ_BURST_1                         (4<<24)
79 #define AHB_SEQ_BURST_4                         (5<<24)
80 #define AHB_SEQ_BURST_8                         (6<<24)
81 #define AHB_SEQ_DBL_BUF                         (1<<19)
82 #define AHB_SEQ_WRAP_SHIFT                      16
83 #define AHB_SEQ_WRAP_MASK                       (0x7<<AHB_SEQ_WRAP_SHIFT)
84
85 #define APB_DMA_CHAN_APB_PTR                    0x018
86
87 #define APB_DMA_CHAN_APB_SEQ                    0x01c
88 #define APB_SEQ_BUS_WIDTH_SHIFT                 28
89 #define APB_SEQ_BUS_WIDTH_MASK                  (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
90 #define APB_SEQ_BUS_WIDTH_8                     (0<<APB_SEQ_BUS_WIDTH_SHIFT)
91 #define APB_SEQ_BUS_WIDTH_16                    (1<<APB_SEQ_BUS_WIDTH_SHIFT)
92 #define APB_SEQ_BUS_WIDTH_32                    (2<<APB_SEQ_BUS_WIDTH_SHIFT)
93 #define APB_SEQ_BUS_WIDTH_64                    (3<<APB_SEQ_BUS_WIDTH_SHIFT)
94 #define APB_SEQ_BUS_WIDTH_128                   (4<<APB_SEQ_BUS_WIDTH_SHIFT)
95 #define APB_SEQ_DATA_SWAP                       (1<<27)
96 #define APB_SEQ_WRAP_SHIFT                      16
97 #define APB_SEQ_WRAP_MASK                       (0x7<<APB_SEQ_WRAP_SHIFT)
98
99 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
100 #define TEGRA_SYSTEM_DMA_CH_NR                  16
101 #else
102 #define TEGRA_SYSTEM_DMA_CH_NR                  32
103 #endif
104 #define TEGRA_SYSTEM_DMA_AVP_CH_NUM             4
105 #define TEGRA_SYSTEM_DMA_CH_MIN                 0
106 #define TEGRA_SYSTEM_DMA_CH_MAX \
107         (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
108
109 /* Maximum dma transfer size */
110 #define TEGRA_DMA_MAX_TRANSFER_SIZE             0x10000
111
112 static struct clk *dma_clk;
113
114 static const unsigned int ahb_addr_wrap_table[8] = {
115         0, 32, 64, 128, 256, 512, 1024, 2048
116 };
117
118 static const unsigned int apb_addr_wrap_table[8] = {
119         0, 1, 2, 4, 8, 16, 32, 64
120 };
121
122 static const unsigned int bus_width_table[5] = {
123         8, 16, 32, 64, 128
124 };
125
126 static void __iomem *general_dma_addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
127 typedef void (*dma_isr_handler)(struct tegra_dma_channel *ch);
128
129 #define TEGRA_DMA_NAME_SIZE 16
130 struct tegra_dma_channel {
131         struct list_head        list;
132         int                     id;
133         spinlock_t              lock;
134         char                    name[TEGRA_DMA_NAME_SIZE];
135         char                    client_name[TEGRA_DMA_NAME_SIZE];
136         void  __iomem           *addr;
137         int                     mode;
138         int                     irq;
139         dma_callback            callback;
140         struct tegra_dma_req    *cb_req;
141         dma_isr_handler         isr_handler;
142 };
143
144 #define  NV_DMA_MAX_CHANNELS  32
145
146 static bool tegra_dma_initialized;
147 static DEFINE_MUTEX(tegra_dma_lock);
148 static DEFINE_SPINLOCK(enable_lock);
149
150 static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
151 static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
152
153 static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
154         struct tegra_dma_req *req);
155 static bool tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
156         struct tegra_dma_req *req);
157 static void handle_oneshot_dma(struct tegra_dma_channel *ch);
158 static void handle_continuous_dbl_dma(struct tegra_dma_channel *ch);
159 static void handle_continuous_sngl_dma(struct tegra_dma_channel *ch);
160 static void handle_dma_isr_locked(struct tegra_dma_channel *ch);
161
162 void tegra_dma_flush(struct tegra_dma_channel *ch)
163 {
164 }
165 EXPORT_SYMBOL(tegra_dma_flush);
166
167 static void tegra_dma_stop(struct tegra_dma_channel *ch)
168 {
169         u32 csr;
170         u32 status;
171
172         csr = readl(ch->addr + APB_DMA_CHAN_CSR);
173         csr &= ~CSR_IE_EOC;
174         writel(csr, ch->addr + APB_DMA_CHAN_CSR);
175
176         csr &= ~CSR_ENB;
177         writel(csr, ch->addr + APB_DMA_CHAN_CSR);
178
179         status = readl(ch->addr + APB_DMA_CHAN_STA);
180         if (status & STA_ISE_EOC)
181                 writel(status, ch->addr + APB_DMA_CHAN_STA);
182 }
183
184 static void pause_dma(bool wait_for_burst_complete)
185 {
186         spin_lock(&enable_lock);
187         writel(0, general_dma_addr + APB_DMA_GEN);
188         if (wait_for_burst_complete)
189                 udelay(20);
190 }
191
192 static void resume_dma(void)
193 {
194         writel(GEN_ENABLE, general_dma_addr + APB_DMA_GEN);
195         spin_unlock(&enable_lock);
196 }
197
198 static void start_head_req(struct tegra_dma_channel *ch)
199 {
200         struct tegra_dma_req *head_req;
201         struct tegra_dma_req *next_req;
202         if (!list_empty(&ch->list)) {
203                 head_req = list_entry(ch->list.next, typeof(*head_req), node);
204                 tegra_dma_update_hw(ch, head_req);
205
206                 /* Set next request to idle. */
207                 if (!list_is_last(&head_req->node, &ch->list)) {
208                         next_req = list_entry(head_req->node.next,
209                                         typeof(*head_req), node);
210                         next_req->status = TEGRA_DMA_REQ_PENDING;
211                 }
212         }
213 }
214
215 static void configure_next_req(struct tegra_dma_channel *ch,
216         struct tegra_dma_req *hreq)
217 {
218         struct tegra_dma_req *next_req;
219         if (!list_is_last(&hreq->node, &ch->list)) {
220                 next_req = list_entry(hreq->node.next, typeof(*next_req), node);
221                 tegra_dma_update_hw_partial(ch, next_req);
222         }
223 }
224
225 static inline unsigned int get_req_xfer_word_count(
226         struct tegra_dma_channel *ch, struct tegra_dma_req *req)
227 {
228         if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
229                 return req->size >> 3;
230         else
231                 return req->size >> 2;
232 }
233
234 static int get_current_xferred_count(struct tegra_dma_channel *ch,
235         struct tegra_dma_req *req, unsigned long status)
236 {
237         int req_transfer_count;
238         req_transfer_count = get_req_xfer_word_count(ch, req) << 2;
239         return req_transfer_count - ((status & STA_COUNT_MASK) + 4);
240 }
241
242 static void tegra_dma_abort_req(struct tegra_dma_channel *ch,
243                 struct tegra_dma_req *req, const char *warn_msg)
244 {
245         unsigned long status = readl(ch->addr + APB_DMA_CHAN_STA);
246
247         /*
248          * Check if interrupt is pending.
249          * This api is called from isr and hence need not to call
250          * isr handle again, just update the byte_transferred.
251          */
252         if (status & STA_ISE_EOC)
253                 req->bytes_transferred += get_req_xfer_word_count(ch, req) << 2;
254         tegra_dma_stop(ch);
255
256         req->bytes_transferred +=  get_current_xferred_count(ch, req, status);
257         req->status = -TEGRA_DMA_REQ_ERROR_STOPPED;
258         if (warn_msg)
259                 WARN(1, KERN_WARNING "%s\n", warn_msg);
260         start_head_req(ch);
261 }
262
263 static void handle_continuous_head_request(struct tegra_dma_channel *ch,
264                 struct tegra_dma_req *last_req)
265 {
266         struct tegra_dma_req *hreq = NULL;
267
268         if (list_empty(&ch->list)) {
269                 tegra_dma_abort_req(ch, last_req, NULL);
270                 return;
271         }
272
273         /*
274          * Check that head req on list should be in flight.
275          * If it is not in flight then request came late
276          * and so need to abort dma and start next request
277          * immediately.
278          */
279         hreq = list_entry(ch->list.next, typeof(*hreq), node);
280         if (hreq->status != TEGRA_DMA_REQ_INFLIGHT) {
281                 tegra_dma_abort_req(ch, last_req, "Req was not queued on time");
282                 return;
283         }
284
285         /* Configure next request in single buffer mode */
286         if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_SINGLE)
287                 configure_next_req(ch, hreq);
288 }
289
290 static unsigned int get_channel_status(struct tegra_dma_channel *ch,
291                         struct tegra_dma_req *req, bool is_stop_dma)
292 {
293         unsigned int status;
294
295         if (is_stop_dma) {
296                 /* STOP the DMA and get the transfer count.
297                  * Getting the transfer count is tricky.
298                  *  - Globally disable DMA on all channels
299                  *  - Read the channel's status register to know the number
300                  *    of pending bytes to be transfered.
301                  *  - Stop the dma channel
302                  *  - Globally re-enable DMA to resume other transfers
303                  */
304                 pause_dma(true);
305                 status = readl(ch->addr + APB_DMA_CHAN_STA);
306                 tegra_dma_stop(ch);
307                 resume_dma();
308                 if (status & STA_ISE_EOC) {
309                         pr_err("Got Dma Int here clearing");
310                         writel(status, ch->addr + APB_DMA_CHAN_STA);
311                 }
312                 req->status = TEGRA_DMA_REQ_ERROR_ABORTED;
313         } else {
314                 status = readl(ch->addr + APB_DMA_CHAN_STA);
315         }
316         return status;
317 }
318
319 /* should be called with the channel lock held */
320 static unsigned int dma_active_count(struct tegra_dma_channel *ch,
321         struct tegra_dma_req *req, unsigned int status)
322 {
323         unsigned int to_transfer;
324         unsigned int req_transfer_count;
325
326         unsigned int bytes_transferred;
327
328         to_transfer = ((status & STA_COUNT_MASK) >> STA_COUNT_SHIFT) + 1;
329         req_transfer_count = get_req_xfer_word_count(ch, req);
330         bytes_transferred = req_transfer_count;
331
332         if (status & STA_BUSY)
333                 bytes_transferred -= to_transfer;
334
335         /*
336          * In continuous transfer mode, DMA only tracks the count of the
337          * half DMA buffer. So, if the DMA already finished half the DMA
338          * then add the half buffer to the completed count.
339          */
340         if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
341                 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
342                         bytes_transferred += req_transfer_count;
343
344         if (status & STA_ISE_EOC)
345                 bytes_transferred += req_transfer_count;
346
347         bytes_transferred *= 4;
348
349         return bytes_transferred;
350 }
351
352 int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
353         struct tegra_dma_req *_req)
354 {
355         struct tegra_dma_req *req = NULL;
356         int found = 0;
357         unsigned int status;
358         unsigned long irq_flags;
359         int stop = 0;
360
361         spin_lock_irqsave(&ch->lock, irq_flags);
362
363         if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req)
364                 stop = 1;
365
366         list_for_each_entry(req, &ch->list, node) {
367                 if (req == _req) {
368                         list_del(&req->node);
369                         found = 1;
370                         break;
371                 }
372         }
373         if (!found) {
374                 spin_unlock_irqrestore(&ch->lock, irq_flags);
375                 return -ENOENT;
376         }
377
378         if (!stop)
379                 goto skip_status;
380
381         status = get_channel_status(ch, req, true);
382         req->bytes_transferred = dma_active_count(ch, req, status);
383
384         if (!list_empty(&ch->list)) {
385                 /* if the list is not empty, queue the next request */
386                 struct tegra_dma_req *next_req;
387                 next_req = list_entry(ch->list.next,
388                         typeof(*next_req), node);
389                 tegra_dma_update_hw(ch, next_req);
390         }
391 skip_status:
392         req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
393
394         spin_unlock_irqrestore(&ch->lock, irq_flags);
395
396         /* Callback should be called without any lock */
397         if(req->complete)
398                 req->complete(req);
399         return 0;
400 }
401 EXPORT_SYMBOL(tegra_dma_dequeue_req);
402
403 int tegra_dma_cancel(struct tegra_dma_channel *ch)
404 {
405         struct tegra_dma_req *hreq = NULL;
406         unsigned long status;
407         unsigned long irq_flags;
408         struct tegra_dma_req *cb_req = NULL;
409         dma_callback callback = NULL;
410         struct list_head new_list;
411
412         INIT_LIST_HEAD(&new_list);
413
414         if (ch->mode & TEGRA_DMA_SHARED) {
415                 pr_err("Can not abort requests from shared channel %d\n",
416                         ch->id);
417                 return -EPERM;
418         }
419
420         spin_lock_irqsave(&ch->lock, irq_flags);
421
422         /* If list is empty, return with error*/
423         if (list_empty(&ch->list)) {
424                 spin_unlock_irqrestore(&ch->lock, irq_flags);
425                 return 0;
426         }
427
428         /* Pause dma before checking the queue status */
429         pause_dma(true);
430         status = readl(ch->addr + APB_DMA_CHAN_STA);
431         if (status & STA_ISE_EOC) {
432                 handle_dma_isr_locked(ch);
433                 cb_req = ch->cb_req;
434                 callback = ch->callback;
435                 ch->cb_req = NULL;
436                 ch->callback = NULL;
437                 /* Read status because it may get changed */
438                 status = readl(ch->addr + APB_DMA_CHAN_STA);
439         }
440
441         /* Abort head requests, stop dma and dequeue all requests */
442         if (!list_empty(&ch->list)) {
443                 tegra_dma_stop(ch);
444                 hreq = list_entry(ch->list.next, typeof(*hreq), node);
445                 hreq->bytes_transferred +=
446                                 get_current_xferred_count(ch, hreq, status);
447
448                 /* copy the list into new list. */
449                 list_replace_init(&ch->list, &new_list);
450         }
451
452         resume_dma();
453
454         spin_unlock_irqrestore(&ch->lock, irq_flags);
455
456         /* Call callback if it is due from interrupts */
457         if (callback)
458                 callback(cb_req);
459
460         /* Abort all requests on list. */
461         while (!list_empty(&new_list)) {
462                 hreq = list_entry(new_list.next, typeof(*hreq), node);
463                 hreq->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
464                 list_del(&hreq->node);
465         }
466
467         return 0;
468 }
469 EXPORT_SYMBOL(tegra_dma_cancel);
470
471 bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
472 {
473         unsigned long irq_flags;
474         bool is_empty;
475
476         spin_lock_irqsave(&ch->lock, irq_flags);
477         if (list_empty(&ch->list))
478                 is_empty = true;
479         else
480                 is_empty = false;
481         spin_unlock_irqrestore(&ch->lock, irq_flags);
482         return is_empty;
483 }
484 EXPORT_SYMBOL(tegra_dma_is_empty);
485
486 bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
487         struct tegra_dma_req *_req)
488 {
489         unsigned long irq_flags;
490         struct tegra_dma_req *req;
491
492         spin_lock_irqsave(&ch->lock, irq_flags);
493         list_for_each_entry(req, &ch->list, node) {
494                 if (req == _req) {
495                         spin_unlock_irqrestore(&ch->lock, irq_flags);
496                         return true;
497                 }
498         }
499         spin_unlock_irqrestore(&ch->lock, irq_flags);
500         return false;
501 }
502 EXPORT_SYMBOL(tegra_dma_is_req_inflight);
503
504 int tegra_dma_get_transfer_count(struct tegra_dma_channel *ch,
505                         struct tegra_dma_req *req)
506 {
507         unsigned int status;
508         unsigned long irq_flags;
509         int bytes_transferred = 0;
510
511         if (IS_ERR_OR_NULL(ch))
512                 BUG();
513
514         spin_lock_irqsave(&ch->lock, irq_flags);
515
516         if (list_entry(ch->list.next, struct tegra_dma_req, node) != req) {
517                 spin_unlock_irqrestore(&ch->lock, irq_flags);
518                 pr_debug("The dma request is not the head req\n");
519                 return req->bytes_transferred;
520         }
521
522         if (req->status != TEGRA_DMA_REQ_INFLIGHT) {
523                 spin_unlock_irqrestore(&ch->lock, irq_flags);
524                 pr_debug("The dma request is not running\n");
525                 return req->bytes_transferred;
526         }
527
528         status = get_channel_status(ch, req, false);
529         bytes_transferred = dma_active_count(ch, req, status);
530         spin_unlock_irqrestore(&ch->lock, irq_flags);
531         return bytes_transferred;
532 }
533 EXPORT_SYMBOL(tegra_dma_get_transfer_count);
534
535 int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
536         struct tegra_dma_req *req)
537 {
538         unsigned long irq_flags;
539         struct tegra_dma_req *_req;
540         int start_dma = 0;
541         struct tegra_dma_req *hreq, *hnreq;
542
543         if (req->size > TEGRA_DMA_MAX_TRANSFER_SIZE ||
544                 req->source_addr & 0x3 || req->dest_addr & 0x3) {
545                 pr_err("Invalid DMA request for channel %d\n", ch->id);
546                 return -EINVAL;
547         }
548
549         if ((req->size & 0x3) ||
550            ((ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE) && (req->size & 0x7)))
551         {
552                 pr_err("Invalid DMA request size 0x%08x for channel %d\n",
553                                 req->size, ch->id);
554                 return -EINVAL;
555         }
556
557         spin_lock_irqsave(&ch->lock, irq_flags);
558
559         list_for_each_entry(_req, &ch->list, node) {
560                 if (req == _req) {
561                         spin_unlock_irqrestore(&ch->lock, irq_flags);
562                         return -EEXIST;
563                 }
564         }
565
566         req->bytes_transferred = 0;
567         req->status = TEGRA_DMA_REQ_PENDING;
568         /* STATUS_EMPTY just means the DMA hasn't processed the buf yet. */
569         req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_EMPTY;
570         if (list_empty(&ch->list))
571                 start_dma = 1;
572
573         list_add_tail(&req->node, &ch->list);
574
575         if (start_dma) {
576                 tegra_dma_update_hw(ch, req);
577         } else {
578                 /*
579                  * Check to see if this request needs to be configured
580                  * immediately in continuous mode.
581                  */
582                 if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
583                         goto end;
584
585                 hreq = list_entry(ch->list.next, typeof(*hreq), node);
586                 hnreq = list_entry(hreq->node.next, typeof(*hnreq), node);
587                 if (hnreq != req)
588                         goto end;
589
590                 if ((ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE) &&
591                     (req->buffer_status != TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL))
592                         goto end;
593
594                 /* Need to configure the new request now */
595                 tegra_dma_update_hw_partial(ch, req);
596         }
597
598 end:
599         spin_unlock_irqrestore(&ch->lock, irq_flags);
600         return 0;
601 }
602 EXPORT_SYMBOL(tegra_dma_enqueue_req);
603
604 static void tegra_dma_dump_channel_usage(void)
605 {
606         int i;
607         pr_info("DMA channel allocation dump:\n");
608         for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
609                 struct tegra_dma_channel *ch = &dma_channels[i];
610                 pr_warn("dma %d used by %s\n", i, ch->client_name);
611         }
612         return;
613 }
614
615 struct tegra_dma_channel *tegra_dma_allocate_channel(int mode,
616                 const char namefmt[], ...)
617 {
618         int channel;
619         struct tegra_dma_channel *ch = NULL;
620         va_list args;
621         dma_isr_handler isr_handler = NULL;
622
623         if (WARN_ON(!tegra_dma_initialized))
624                 return NULL;
625
626         mutex_lock(&tegra_dma_lock);
627
628         /* first channel is the shared channel */
629         if (mode & TEGRA_DMA_SHARED) {
630                 channel = TEGRA_SYSTEM_DMA_CH_MIN;
631         } else {
632                 channel = find_first_zero_bit(channel_usage,
633                         ARRAY_SIZE(dma_channels));
634                 if (channel >= ARRAY_SIZE(dma_channels)) {
635                         tegra_dma_dump_channel_usage();
636                         goto out;
637                 }
638         }
639
640         if (mode & TEGRA_DMA_MODE_ONESHOT)
641                 isr_handler = handle_oneshot_dma;
642         else if (mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
643                 isr_handler = handle_continuous_dbl_dma;
644         else if (mode & TEGRA_DMA_MODE_CONTINUOUS_SINGLE)
645                 isr_handler = handle_continuous_sngl_dma;
646         else
647                 pr_err("Bad channel mode for DMA ISR handler\n");
648
649         if (!isr_handler)
650                 goto out;
651
652         __set_bit(channel, channel_usage);
653         ch = &dma_channels[channel];
654         ch->mode = mode;
655         ch->isr_handler = isr_handler;
656         va_start(args, namefmt);
657         vsnprintf(ch->client_name, sizeof(ch->client_name),
658                 namefmt, args);
659         va_end(args);
660
661 out:
662         mutex_unlock(&tegra_dma_lock);
663         return ch;
664 }
665 EXPORT_SYMBOL(tegra_dma_allocate_channel);
666
667 void tegra_dma_free_channel(struct tegra_dma_channel *ch)
668 {
669         if (ch->mode & TEGRA_DMA_SHARED)
670                 return;
671         tegra_dma_cancel(ch);
672         mutex_lock(&tegra_dma_lock);
673         __clear_bit(ch->id, channel_usage);
674         memset(ch->client_name, 0, sizeof(ch->client_name));
675         ch->isr_handler = NULL;
676         ch->callback = NULL;
677         ch->cb_req = NULL;
678         mutex_unlock(&tegra_dma_lock);
679 }
680 EXPORT_SYMBOL(tegra_dma_free_channel);
681
682 int tegra_dma_get_channel_id(struct tegra_dma_channel *ch)
683 {
684         return ch->id;
685 }
686 EXPORT_SYMBOL(tegra_dma_get_channel_id);
687
688 static bool tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
689         struct tegra_dma_req *req)
690 {
691         u32 apb_ptr;
692         u32 ahb_ptr;
693         u32 csr;
694         unsigned long status;
695         unsigned int req_transfer_count;
696         bool configure = false;
697
698         if (req->to_memory) {
699                 apb_ptr = req->source_addr;
700                 ahb_ptr = req->dest_addr;
701         } else {
702                 apb_ptr = req->dest_addr;
703                 ahb_ptr = req->source_addr;
704         }
705
706         /*
707          * The dma controller reloads the new configuration for next transfer
708          * after last burst of current transfer completes.
709          * If there is no IEC status then this make sure that last burst
710          * has not be completed.
711          * If there is already IEC status then interrupt handle need to
712          * load new configuration after aborting current dma.
713          */
714         pause_dma(false);
715         status  = readl(ch->addr + APB_DMA_CHAN_STA);
716
717         /*
718          * If interrupt is pending then do nothing as the ISR will handle
719          * the programing for new request.
720          */
721         if (status & STA_ISE_EOC) {
722                 pr_warn("%s(): "
723                         "Skipping new configuration as interrupt is pending\n",
724                                 __func__);
725                 goto exit_config;
726         }
727
728         /* Safe to program new configuration */
729         writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
730         writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
731
732         req_transfer_count = get_req_xfer_word_count(ch, req);
733         csr = readl(ch->addr + APB_DMA_CHAN_CSR);
734         csr &= ~CSR_WCOUNT_MASK;
735         csr |= (req_transfer_count - 1) << CSR_WCOUNT_SHIFT;
736         writel(csr, ch->addr + APB_DMA_CHAN_CSR);
737         req->status = TEGRA_DMA_REQ_INFLIGHT;
738         configure = true;
739
740 exit_config:
741         resume_dma();
742         return configure;
743 }
744
745 static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
746         struct tegra_dma_req *req)
747 {
748         int ahb_addr_wrap;
749         int apb_addr_wrap;
750         int ahb_bus_width;
751         int apb_bus_width;
752         int index;
753         unsigned int req_transfer_count;
754
755         u32 ahb_seq;
756         u32 apb_seq;
757         u32 ahb_ptr;
758         u32 apb_ptr;
759         u32 csr;
760
761         csr = CSR_FLOW;
762         if (req->complete || req->threshold)
763                 csr |= CSR_IE_EOC;
764
765         ahb_seq = AHB_SEQ_INTR_ENB;
766
767         switch (req->req_sel) {
768         case TEGRA_DMA_REQ_SEL_SL2B1:
769         case TEGRA_DMA_REQ_SEL_SL2B2:
770         case TEGRA_DMA_REQ_SEL_SL2B3:
771         case TEGRA_DMA_REQ_SEL_SL2B4:
772 #if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
773         case TEGRA_DMA_REQ_SEL_SL2B5:
774         case TEGRA_DMA_REQ_SEL_SL2B6:
775         case TEGRA_DMA_REQ_SEL_APBIF_CH0:
776         case TEGRA_DMA_REQ_SEL_APBIF_CH1:
777         case TEGRA_DMA_REQ_SEL_APBIF_CH2:
778         case TEGRA_DMA_REQ_SEL_APBIF_CH3:
779 #endif
780         case TEGRA_DMA_REQ_SEL_SPI:
781                 /* dtv interface has fixed burst size of 4 */
782                 if (req->fixed_burst_size) {
783                         ahb_seq |= AHB_SEQ_BURST_4;
784                         break;
785                 }
786                 /* For spi/slink the burst size based on transfer size
787                  * i.e. if multiple of 32 bytes then busrt is 8
788                  * word(8x32bits) else if multiple of 16 bytes then
789                  * burst is 4 word(4x32bits) else burst size is 1
790                  * word(1x32bits) */
791                 if (req->size & 0xF)
792                         ahb_seq |= AHB_SEQ_BURST_1;
793                 else if ((req->size >> 4) & 0x1)
794                         ahb_seq |= AHB_SEQ_BURST_4;
795                 else
796                         ahb_seq |= AHB_SEQ_BURST_8;
797                 break;
798 #if defined(CONFIG_ARCH_TEGRA_2x_SOC)
799         case TEGRA_DMA_REQ_SEL_I2S_2:
800         case TEGRA_DMA_REQ_SEL_I2S_1:
801         case TEGRA_DMA_REQ_SEL_SPD_I:
802         case TEGRA_DMA_REQ_SEL_UI_I:
803         case TEGRA_DMA_REQ_SEL_I2S2_2:
804         case TEGRA_DMA_REQ_SEL_I2S2_1:
805                 /* For ARCH_2x i2s/spdif burst size is 4 word */
806                 ahb_seq |= AHB_SEQ_BURST_4;
807                 break;
808 #endif
809
810         default:
811                 ahb_seq |= AHB_SEQ_BURST_1;
812                 break;
813         }
814
815         apb_seq = 0;
816
817         csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
818
819         req_transfer_count = get_req_xfer_word_count(ch, req);
820
821         /* One shot mode is always single buffered.  Continuous mode could
822          * support either.
823          */
824         if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
825                 csr |= CSR_ONCE;
826
827         if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
828                 ahb_seq |= AHB_SEQ_DBL_BUF;
829
830         csr |= (req_transfer_count - 1) << CSR_WCOUNT_SHIFT;
831
832         if (req->to_memory) {
833                 apb_ptr = req->source_addr;
834                 ahb_ptr = req->dest_addr;
835
836                 apb_addr_wrap = req->source_wrap;
837                 ahb_addr_wrap = req->dest_wrap;
838                 apb_bus_width = req->source_bus_width;
839                 ahb_bus_width = req->dest_bus_width;
840
841         } else {
842                 csr |= CSR_DIR;
843                 apb_ptr = req->dest_addr;
844                 ahb_ptr = req->source_addr;
845
846                 apb_addr_wrap = req->dest_wrap;
847                 ahb_addr_wrap = req->source_wrap;
848                 apb_bus_width = req->dest_bus_width;
849                 ahb_bus_width = req->source_bus_width;
850         }
851
852         apb_addr_wrap >>= 2;
853         ahb_addr_wrap >>= 2;
854
855         /* set address wrap for APB size */
856         index = 0;
857         do  {
858                 if (apb_addr_wrap_table[index] == apb_addr_wrap)
859                         break;
860                 index++;
861         } while (index < ARRAY_SIZE(apb_addr_wrap_table));
862         BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
863         apb_seq |= index << APB_SEQ_WRAP_SHIFT;
864
865         /* set address wrap for AHB size */
866         index = 0;
867         do  {
868                 if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
869                         break;
870                 index++;
871         } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
872         BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
873         ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
874
875         for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
876                 if (bus_width_table[index] == ahb_bus_width)
877                         break;
878         }
879         BUG_ON(index == ARRAY_SIZE(bus_width_table));
880         ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
881
882         for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
883                 if (bus_width_table[index] == apb_bus_width)
884                         break;
885         }
886         BUG_ON(index == ARRAY_SIZE(bus_width_table));
887         apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
888
889         writel(csr, ch->addr + APB_DMA_CHAN_CSR);
890         writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
891         writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
892         writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
893         writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
894
895         csr |= CSR_ENB;
896         writel(csr, ch->addr + APB_DMA_CHAN_CSR);
897
898         req->status = TEGRA_DMA_REQ_INFLIGHT;
899 }
900
901 static void handle_oneshot_dma(struct tegra_dma_channel *ch)
902 {
903         struct tegra_dma_req *req;
904
905         req = list_entry(ch->list.next, typeof(*req), node);
906         list_del(&req->node);
907         req->bytes_transferred += req->size;
908         req->status = TEGRA_DMA_REQ_SUCCESS;
909
910         ch->callback = req->complete;
911         ch->cb_req = req;
912
913         start_head_req(ch);
914         return;
915 }
916
917 static void handle_continuous_dbl_dma(struct tegra_dma_channel *ch)
918 {
919         struct tegra_dma_req *req;
920
921         req = list_entry(ch->list.next, typeof(*req), node);
922
923         if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
924                 bool is_dma_ping_complete;
925                 unsigned long status = readl(ch->addr + APB_DMA_CHAN_STA);
926                 is_dma_ping_complete = (status & STA_PING_PONG) ? true : false;
927
928                 /* Ping pong status shows in reverse if it is Memory write */
929                 if (req->to_memory)
930                         is_dma_ping_complete = !is_dma_ping_complete;
931
932                 /* Out of sync - Release current buffer */
933                 if (!is_dma_ping_complete) {
934                         /*
935                          * We should not land here if queue mechanism
936                          * with system latency are properly configured.
937                          */
938                         req->bytes_transferred += req->size;
939
940                         list_del(&req->node);
941                         ch->callback = req->complete;
942                         ch->cb_req = req;
943
944                         tegra_dma_abort_req(ch, req,
945                                 "Dma becomes out of sync for ping-pong buffer");
946                         return;
947                 }
948
949                 /*
950                  * Configure next request so after full buffer transfer,
951                  * it can be start without sw intervention.
952                  */
953                 configure_next_req(ch, req);
954
955                 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
956                 req->status = TEGRA_DMA_REQ_SUCCESS;
957                 req->bytes_transferred += req->size >> 1;
958
959                 ch->callback = req->threshold;
960                 ch->cb_req = req;
961                 return;
962         }
963
964         if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
965                 /* Interrupt for full buffer complete */
966                 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
967                 req->bytes_transferred += req->size >> 1;
968                 req->status = TEGRA_DMA_REQ_SUCCESS;
969
970                 list_del(&req->node);
971                 ch->callback = req->complete;
972                 ch->cb_req = req;
973
974                 handle_continuous_head_request(ch, req);
975                 return;
976         }
977         tegra_dma_abort_req(ch, req, "Dma status is not on sync\n");
978         /* Dma should be stop much earlier */
979         BUG();
980         return;
981 }
982
983 static void handle_continuous_sngl_dma(struct tegra_dma_channel *ch)
984 {
985         struct tegra_dma_req *req;
986
987         req = list_entry(ch->list.next, typeof(*req), node);
988         if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_FULL) {
989                 tegra_dma_stop(ch);
990                 pr_err("%s: DMA complete irq without corresponding req\n",
991                                 __func__);
992                 WARN_ON(1);
993                 return;
994         }
995
996         /* Handle the case when buffer is completely full */
997         req->bytes_transferred += req->size;
998         req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
999         req->status = TEGRA_DMA_REQ_SUCCESS;
1000
1001         list_del(&req->node);
1002         ch->callback = req->complete;
1003         ch->cb_req = req;
1004
1005         handle_continuous_head_request(ch, req);
1006         return;
1007 }
1008
1009 static void handle_dma_isr_locked(struct tegra_dma_channel *ch)
1010 {
1011         /* There should be proper isr handler */
1012         BUG_ON(!ch->isr_handler);
1013
1014         if (list_empty(&ch->list)) {
1015                 tegra_dma_stop(ch);
1016                 pr_err("%s: No requests in the list.\n", __func__);
1017                 WARN_ON(1);
1018                 return;
1019         }
1020
1021         ch->isr_handler(ch);
1022 }
1023
1024 static irqreturn_t dma_isr(int irq, void *data)
1025 {
1026         struct tegra_dma_channel *ch = data;
1027         unsigned long irq_flags;
1028         unsigned long status;
1029         dma_callback callback = NULL;
1030         struct tegra_dma_req *cb_req = NULL;
1031
1032         spin_lock_irqsave(&ch->lock, irq_flags);
1033
1034         /*
1035          * Calbacks should be set and cleared while holding the spinlock,
1036          * never left set
1037          */
1038         if (ch->callback || ch->cb_req)
1039                 pr_err("%s():"
1040                         "Channel %d callbacks are not initialized properly\n",
1041                         __func__, ch->id);
1042         BUG_ON(ch->callback || ch->cb_req);
1043
1044         status = readl(ch->addr + APB_DMA_CHAN_STA);
1045         if (status & STA_ISE_EOC) {
1046                 /* Clear dma int status */
1047                 writel(status, ch->addr + APB_DMA_CHAN_STA);
1048                 handle_dma_isr_locked(ch);
1049                 callback = ch->callback;
1050                 cb_req = ch->cb_req;
1051                 ch->callback = NULL;
1052                 ch->cb_req = NULL;
1053         } else {
1054                 pr_info("Interrupt is already handled %d\n", ch->id);
1055         }
1056         spin_unlock_irqrestore(&ch->lock, irq_flags);
1057
1058         /* Call callback function to notify client if it is there */
1059         if (callback)
1060                 callback(cb_req);
1061         return IRQ_HANDLED;
1062 }
1063
1064 int __init tegra_dma_init(void)
1065 {
1066         int ret = 0;
1067         int i;
1068         unsigned int irq;
1069
1070         bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS);
1071
1072         dma_clk = clk_get_sys("tegra-dma", NULL);
1073         if (IS_ERR_OR_NULL(dma_clk)) {
1074                 pr_err("Unable to get clock for APB DMA\n");
1075                 ret = PTR_ERR(dma_clk);
1076                 goto fail;
1077         }
1078         ret = clk_enable(dma_clk);
1079         if (ret != 0) {
1080                 pr_err("Unable to enable clock for APB DMA\n");
1081                 goto fail;
1082         }
1083
1084         /*
1085          * Resetting all dma channels to make sure all channels are in init
1086          * state.
1087          */
1088         tegra_periph_reset_assert(dma_clk);
1089         udelay(10);
1090         tegra_periph_reset_deassert(dma_clk);
1091         udelay(10);
1092
1093         writel(GEN_ENABLE, general_dma_addr + APB_DMA_GEN);
1094         writel(0, general_dma_addr + APB_DMA_CNTRL);
1095         writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
1096                         general_dma_addr + APB_DMA_IRQ_MASK_SET);
1097
1098         for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
1099                 struct tegra_dma_channel *ch = &dma_channels[i];
1100
1101                 ch->id = i;
1102                 ch->isr_handler = NULL;
1103                 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
1104
1105                 memset(ch->client_name, 0, sizeof(ch->client_name));
1106
1107                 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
1108                         TEGRA_APB_DMA_CH0_SIZE * i);
1109
1110                 spin_lock_init(&ch->lock);
1111                 INIT_LIST_HEAD(&ch->list);
1112
1113 #ifndef CONFIG_ARCH_TEGRA_2x_SOC
1114                 if (i >= 16)
1115                         irq = INT_APB_DMA_CH16 + i - 16;
1116                 else
1117 #endif
1118                         irq = INT_APB_DMA_CH0 + i;
1119                 ret = request_irq(irq, dma_isr, 0, dma_channels[i].name, ch);
1120                 if (ret) {
1121                         pr_err("Failed to register IRQ %d for DMA %d\n",
1122                                 irq, i);
1123                         goto fail;
1124                 }
1125                 ch->irq = irq;
1126
1127                 __clear_bit(i, channel_usage);
1128         }
1129         /* mark the shared channel allocated */
1130         __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
1131
1132         tegra_dma_initialized = true;
1133
1134         return 0;
1135 fail:
1136         writel(0, general_dma_addr + APB_DMA_GEN);
1137         for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
1138                 struct tegra_dma_channel *ch = &dma_channels[i];
1139                 if (ch->irq)
1140                         free_irq(ch->irq, ch);
1141         }
1142         return ret;
1143 }
1144 postcore_initcall(tegra_dma_init);
1145
1146 #ifdef CONFIG_PM_SLEEP
1147
1148 static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
1149
1150 static int tegra_dma_suspend(void)
1151 {
1152         u32 *ctx = apb_dma;
1153         int i;
1154
1155         *ctx++ = readl(general_dma_addr + APB_DMA_GEN);
1156         *ctx++ = readl(general_dma_addr + APB_DMA_CNTRL);
1157         *ctx++ = readl(general_dma_addr + APB_DMA_IRQ_MASK);
1158
1159         for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
1160                 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
1161                                   TEGRA_APB_DMA_CH0_SIZE * i);
1162
1163                 *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
1164                 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
1165                 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
1166                 *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
1167                 *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
1168         }
1169
1170         /* Disabling clock of dma. */
1171         clk_disable(dma_clk);
1172         return 0;
1173 }
1174
1175 static void tegra_dma_resume(void)
1176 {
1177         u32 *ctx = apb_dma;
1178         int i;
1179
1180         /* Enabling clock of dma. */
1181         clk_enable(dma_clk);
1182
1183         writel(*ctx++, general_dma_addr + APB_DMA_GEN);
1184         writel(*ctx++, general_dma_addr + APB_DMA_CNTRL);
1185         writel(*ctx++, general_dma_addr + APB_DMA_IRQ_MASK);
1186
1187         for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
1188                 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
1189                                   TEGRA_APB_DMA_CH0_SIZE * i);
1190
1191                 writel(*ctx++, addr + APB_DMA_CHAN_CSR);
1192                 writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
1193                 writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
1194                 writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
1195                 writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
1196         }
1197 }
1198
1199 static struct syscore_ops tegra_dma_syscore_ops = {
1200         .suspend = tegra_dma_suspend,
1201         .resume = tegra_dma_resume,
1202 };
1203
1204 static int tegra_dma_syscore_init(void)
1205 {
1206         register_syscore_ops(&tegra_dma_syscore_ops);
1207
1208         return 0;
1209 }
1210 subsys_initcall(tegra_dma_syscore_init);
1211 #endif
1212
1213 #ifdef CONFIG_DEBUG_FS
1214
1215 #include <linux/debugfs.h>
1216 #include <linux/seq_file.h>
1217
1218 static int dbg_dma_show(struct seq_file *s, void *unused)
1219 {
1220         int i;
1221
1222         seq_printf(s, "    APBDMA global register\n");
1223         seq_printf(s, "DMA_GEN:   0x%08x\n",
1224                         __raw_readl(general_dma_addr + APB_DMA_GEN));
1225         seq_printf(s, "DMA_CNTRL: 0x%08x\n",
1226                         __raw_readl(general_dma_addr + APB_DMA_CNTRL));
1227         seq_printf(s, "IRQ_MASK:  0x%08x\n",
1228                         __raw_readl(general_dma_addr + APB_DMA_IRQ_MASK));
1229
1230         for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
1231                 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
1232                                   TEGRA_APB_DMA_CH0_SIZE * i);
1233
1234                 seq_printf(s, "    APBDMA channel %02d register\n", i);
1235                 seq_printf(s, "0x00: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1236                                         __raw_readl(addr + 0x0),
1237                                         __raw_readl(addr + 0x4),
1238                                         __raw_readl(addr + 0x8),
1239                                         __raw_readl(addr + 0xC));
1240                 seq_printf(s, "0x10: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1241                                         __raw_readl(addr + 0x10),
1242                                         __raw_readl(addr + 0x14),
1243                                         __raw_readl(addr + 0x18),
1244                                         __raw_readl(addr + 0x1C));
1245         }
1246         seq_printf(s, "\nAPB DMA users\n");
1247         seq_printf(s, "-------------\n");
1248         for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
1249                 struct tegra_dma_channel *ch = &dma_channels[i];
1250                 if (strlen(ch->client_name) > 0)
1251                         seq_printf(s, "dma %d -> %s\n", i, ch->client_name);
1252         }
1253         return 0;
1254 }
1255
1256 static int dbg_dma_open(struct inode *inode, struct file *file)
1257 {
1258         return single_open(file, dbg_dma_show, &inode->i_private);
1259 }
1260
1261 static const struct file_operations debug_fops = {
1262         .open      = dbg_dma_open,
1263         .read      = seq_read,
1264         .llseek  = seq_lseek,
1265         .release        = single_release,
1266 };
1267
1268 static int __init tegra_dma_debuginit(void)
1269 {
1270         (void) debugfs_create_file("tegra_dma", S_IRUGO,
1271                                         NULL, NULL, &debug_fops);
1272         return 0;
1273 }
1274 late_initcall(tegra_dma_debuginit);
1275 #endif