dmaengine: Driver for Topcliff PCH DMA controller
[linux-2.6.git] / drivers / dma / pch_dma.c
1 /*
2  * Topcliff PCH DMA controller driver
3  * Copyright (c) 2010 Intel Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  */
18
19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/init.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/pch_dma.h>
26
27 #define DRV_NAME "pch-dma"
28
29 #define DMA_CTL0_DISABLE                0x0
30 #define DMA_CTL0_SG                     0x1
31 #define DMA_CTL0_ONESHOT                0x2
32 #define DMA_CTL0_MODE_MASK_BITS         0x3
33 #define DMA_CTL0_DIR_SHIFT_BITS         2
34 #define DMA_CTL0_BITS_PER_CH            4
35
36 #define DMA_CTL2_START_SHIFT_BITS       8
37 #define DMA_CTL2_IRQ_ENABLE_MASK        ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
38
39 #define DMA_STATUS_IDLE                 0x0
40 #define DMA_STATUS_DESC_READ            0x1
41 #define DMA_STATUS_WAIT                 0x2
42 #define DMA_STATUS_ACCESS               0x3
43 #define DMA_STATUS_BITS_PER_CH          2
44 #define DMA_STATUS_MASK_BITS            0x3
45 #define DMA_STATUS_SHIFT_BITS           16
46 #define DMA_STATUS_IRQ(x)               (0x1 << (x))
47 #define DMA_STATUS_ERR(x)               (0x1 << ((x) + 8))
48
49 #define DMA_DESC_WIDTH_SHIFT_BITS       12
50 #define DMA_DESC_WIDTH_1_BYTE           (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
51 #define DMA_DESC_WIDTH_2_BYTES          (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
52 #define DMA_DESC_WIDTH_4_BYTES          (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
53 #define DMA_DESC_MAX_COUNT_1_BYTE       0x3FF
54 #define DMA_DESC_MAX_COUNT_2_BYTES      0x3FF
55 #define DMA_DESC_MAX_COUNT_4_BYTES      0x7FF
56 #define DMA_DESC_END_WITHOUT_IRQ        0x0
57 #define DMA_DESC_END_WITH_IRQ           0x1
58 #define DMA_DESC_FOLLOW_WITHOUT_IRQ     0x2
59 #define DMA_DESC_FOLLOW_WITH_IRQ        0x3
60
61 #define MAX_CHAN_NR                     8
62
63 static unsigned int init_nr_desc_per_channel = 64;
64 module_param(init_nr_desc_per_channel, uint, 0644);
65 MODULE_PARM_DESC(init_nr_desc_per_channel,
66                  "initial descriptors per channel (default: 64)");
67
68 struct pch_dma_desc_regs {
69         u32     dev_addr;
70         u32     mem_addr;
71         u32     size;
72         u32     next;
73 };
74
75 struct pch_dma_regs {
76         u32     dma_ctl0;
77         u32     dma_ctl1;
78         u32     dma_ctl2;
79         u32     reserved1;
80         u32     dma_sts0;
81         u32     dma_sts1;
82         u32     reserved2;
83         u32     reserved3;
84         struct pch_dma_desc_regs desc[0];
85 };
86
87 struct pch_dma_desc {
88         struct pch_dma_desc_regs regs;
89         struct dma_async_tx_descriptor txd;
90         struct list_head        desc_node;
91         struct list_head        tx_list;
92 };
93
94 struct pch_dma_chan {
95         struct dma_chan         chan;
96         void __iomem *membase;
97         enum dma_data_direction dir;
98         struct tasklet_struct   tasklet;
99         unsigned long           err_status;
100
101         spinlock_t              lock;
102
103         dma_cookie_t            completed_cookie;
104         struct list_head        active_list;
105         struct list_head        queue;
106         struct list_head        free_list;
107         unsigned int            descs_allocated;
108 };
109
110 #define PDC_DEV_ADDR    0x00
111 #define PDC_MEM_ADDR    0x04
112 #define PDC_SIZE        0x08
113 #define PDC_NEXT        0x0C
114
115 #define channel_readl(pdc, name) \
116         readl((pdc)->membase + PDC_##name)
117 #define channel_writel(pdc, name, val) \
118         writel((val), (pdc)->membase + PDC_##name)
119
120 struct pch_dma {
121         struct dma_device       dma;
122         void __iomem *membase;
123         struct pci_pool         *pool;
124         struct pch_dma_regs     regs;
125         struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
126         struct pch_dma_chan     channels[0];
127 };
128
129 #define PCH_DMA_CTL0    0x00
130 #define PCH_DMA_CTL1    0x04
131 #define PCH_DMA_CTL2    0x08
132 #define PCH_DMA_STS0    0x10
133 #define PCH_DMA_STS1    0x14
134
135 #define dma_readl(pd, name) \
136         __raw_readl((pd)->membase + PCH_DMA_##name)
137 #define dma_writel(pd, name, val) \
138         __raw_writel((val), (pd)->membase + PCH_DMA_##name)
139
140 static inline struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
141 {
142         return container_of(txd, struct pch_dma_desc, txd);
143 }
144
145 static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
146 {
147         return container_of(chan, struct pch_dma_chan, chan);
148 }
149
150 static inline struct pch_dma *to_pd(struct dma_device *ddev)
151 {
152         return container_of(ddev, struct pch_dma, dma);
153 }
154
155 static inline struct device *chan2dev(struct dma_chan *chan)
156 {
157         return &chan->dev->device;
158 }
159
160 static inline struct device *chan2parent(struct dma_chan *chan)
161 {
162         return chan->dev->device.parent;
163 }
164
165 static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
166 {
167         return list_first_entry(&pd_chan->active_list,
168                                 struct pch_dma_desc, desc_node);
169 }
170
171 static inline struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
172 {
173         return list_first_entry(&pd_chan->queue,
174                                 struct pch_dma_desc, desc_node);
175 }
176
177 static void pdc_enable_irq(struct dma_chan *chan, int enable)
178 {
179         struct pch_dma *pd = to_pd(chan->device);
180         u32 val;
181
182         val = dma_readl(pd, CTL2);
183
184         if (enable)
185                 val |= 0x1 << chan->chan_id;
186         else
187                 val &= ~(0x1 << chan->chan_id);
188
189         dma_writel(pd, CTL2, val);
190
191         dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
192                 chan->chan_id, val);
193 }
194
195 static void pdc_set_dir(struct dma_chan *chan)
196 {
197         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
198         struct pch_dma *pd = to_pd(chan->device);
199         u32 val;
200
201         val = dma_readl(pd, CTL0);
202
203         if (pd_chan->dir == DMA_TO_DEVICE)
204                 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
205                                DMA_CTL0_DIR_SHIFT_BITS);
206         else
207                 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
208                                  DMA_CTL0_DIR_SHIFT_BITS));
209
210         dma_writel(pd, CTL0, val);
211
212         dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
213                 chan->chan_id, val);
214 }
215
216 static void pdc_set_mode(struct dma_chan *chan, u32 mode)
217 {
218         struct pch_dma *pd = to_pd(chan->device);
219         u32 val;
220
221         val = dma_readl(pd, CTL0);
222
223         val &= ~(DMA_CTL0_MODE_MASK_BITS <<
224                 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
225         val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
226
227         dma_writel(pd, CTL0, val);
228
229         dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
230                 chan->chan_id, val);
231 }
232
233 static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
234 {
235         struct pch_dma *pd = to_pd(pd_chan->chan.device);
236         u32 val;
237
238         val = dma_readl(pd, STS0);
239         return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
240                         DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
241 }
242
243 static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
244 {
245         if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE)
246                 return true;
247         else
248                 return false;
249 }
250
251 static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
252 {
253         struct pch_dma *pd = to_pd(pd_chan->chan.device);
254         u32 val;
255
256         if (!pdc_is_idle(pd_chan)) {
257                 dev_err(chan2dev(&pd_chan->chan),
258                         "BUG: Attempt to start non-idle channel\n");
259                 return;
260         }
261
262         channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
263         channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
264         channel_writel(pd_chan, SIZE, desc->regs.size);
265         channel_writel(pd_chan, NEXT, desc->regs.next);
266
267         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
268                 pd_chan->chan.chan_id, desc->regs.dev_addr);
269         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
270                 pd_chan->chan.chan_id, desc->regs.mem_addr);
271         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
272                 pd_chan->chan.chan_id, desc->regs.size);
273         dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
274                 pd_chan->chan.chan_id, desc->regs.next);
275
276         if (list_empty(&desc->tx_list))
277                 pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
278         else
279                 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
280
281         val = dma_readl(pd, CTL2);
282         val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id);
283         dma_writel(pd, CTL2, val);
284 }
285
286 static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
287                                struct pch_dma_desc *desc)
288 {
289         struct dma_async_tx_descriptor *txd = &desc->txd;
290         dma_async_tx_callback callback = txd->callback;
291         void *param = txd->callback_param;
292
293         list_splice_init(&desc->tx_list, &pd_chan->free_list);
294         list_move(&desc->desc_node, &pd_chan->free_list);
295
296         if (callback)
297                 callback(param);
298 }
299
300 static void pdc_complete_all(struct pch_dma_chan *pd_chan)
301 {
302         struct pch_dma_desc *desc, *_d;
303         LIST_HEAD(list);
304
305         BUG_ON(!pdc_is_idle(pd_chan));
306
307         if (!list_empty(&pd_chan->queue))
308                 pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
309
310         list_splice_init(&pd_chan->active_list, &list);
311         list_splice_init(&pd_chan->queue, &pd_chan->active_list);
312
313         list_for_each_entry_safe(desc, _d, &list, desc_node)
314                 pdc_chain_complete(pd_chan, desc);
315 }
316
317 static void pdc_handle_error(struct pch_dma_chan *pd_chan)
318 {
319         struct pch_dma_desc *bad_desc;
320
321         bad_desc = pdc_first_active(pd_chan);
322         list_del(&bad_desc->desc_node);
323
324         list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
325
326         if (!list_empty(&pd_chan->active_list))
327                 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
328
329         dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
330         dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
331                  bad_desc->txd.cookie);
332
333         pdc_chain_complete(pd_chan, bad_desc);
334 }
335
336 static void pdc_advance_work(struct pch_dma_chan *pd_chan)
337 {
338         if (list_empty(&pd_chan->active_list) ||
339                 list_is_singular(&pd_chan->active_list)) {
340                 pdc_complete_all(pd_chan);
341         } else {
342                 pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
343                 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
344         }
345 }
346
347 static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan,
348                                       struct pch_dma_desc *desc)
349 {
350         dma_cookie_t cookie = pd_chan->chan.cookie;
351
352         if (++cookie < 0)
353                 cookie = 1;
354
355         pd_chan->chan.cookie = cookie;
356         desc->txd.cookie = cookie;
357
358         return cookie;
359 }
360
361 static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
362 {
363         struct pch_dma_desc *desc = to_pd_desc(txd);
364         struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
365         dma_cookie_t cookie;
366
367         spin_lock_bh(&pd_chan->lock);
368         cookie = pdc_assign_cookie(pd_chan, desc);
369
370         if (list_empty(&pd_chan->active_list)) {
371                 list_add_tail(&desc->desc_node, &pd_chan->active_list);
372                 pdc_dostart(pd_chan, desc);
373         } else {
374                 list_add_tail(&desc->desc_node, &pd_chan->queue);
375         }
376
377         spin_unlock_bh(&pd_chan->lock);
378         return 0;
379 }
380
381 static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
382 {
383         struct pch_dma_desc *desc = NULL;
384         struct pch_dma *pd = to_pd(chan->device);
385         dma_addr_t addr;
386
387         desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr);
388         if (desc) {
389                 memset(desc, 0, sizeof(struct pch_dma_desc));
390                 INIT_LIST_HEAD(&desc->tx_list);
391                 dma_async_tx_descriptor_init(&desc->txd, chan);
392                 desc->txd.tx_submit = pd_tx_submit;
393                 desc->txd.flags = DMA_CTRL_ACK;
394                 desc->txd.phys = addr;
395         }
396
397         return desc;
398 }
399
400 static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
401 {
402         struct pch_dma_desc *desc, *_d;
403         struct pch_dma_desc *ret = NULL;
404         int i;
405
406         spin_lock_bh(&pd_chan->lock);
407         list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
408                 i++;
409                 if (async_tx_test_ack(&desc->txd)) {
410                         list_del(&desc->desc_node);
411                         ret = desc;
412                         break;
413                 }
414                 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
415         }
416         spin_unlock_bh(&pd_chan->lock);
417         dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
418
419         if (!ret) {
420                 ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
421                 if (ret) {
422                         spin_lock_bh(&pd_chan->lock);
423                         pd_chan->descs_allocated++;
424                         spin_unlock_bh(&pd_chan->lock);
425                 } else {
426                         dev_err(chan2dev(&pd_chan->chan),
427                                 "failed to alloc desc\n");
428                 }
429         }
430
431         return ret;
432 }
433
434 static void pdc_desc_put(struct pch_dma_chan *pd_chan,
435                          struct pch_dma_desc *desc)
436 {
437         if (desc) {
438                 spin_lock_bh(&pd_chan->lock);
439                 list_splice_init(&desc->tx_list, &pd_chan->free_list);
440                 list_add(&desc->desc_node, &pd_chan->free_list);
441                 spin_unlock_bh(&pd_chan->lock);
442         }
443 }
444
445 static int pd_alloc_chan_resources(struct dma_chan *chan)
446 {
447         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
448         struct pch_dma_desc *desc;
449         LIST_HEAD(tmp_list);
450         int i;
451
452         if (!pdc_is_idle(pd_chan)) {
453                 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
454                 return -EIO;
455         }
456
457         if (!list_empty(&pd_chan->free_list))
458                 return pd_chan->descs_allocated;
459
460         for (i = 0; i < init_nr_desc_per_channel; i++) {
461                 desc = pdc_alloc_desc(chan, GFP_KERNEL);
462
463                 if (!desc) {
464                         dev_warn(chan2dev(chan),
465                                 "Only allocated %d initial descriptors\n", i);
466                         break;
467                 }
468
469                 list_add_tail(&desc->desc_node, &tmp_list);
470         }
471
472         spin_lock_bh(&pd_chan->lock);
473         list_splice(&tmp_list, &pd_chan->free_list);
474         pd_chan->descs_allocated = i;
475         pd_chan->completed_cookie = chan->cookie = 1;
476         spin_unlock_bh(&pd_chan->lock);
477
478         pdc_enable_irq(chan, 1);
479         pdc_set_dir(chan);
480
481         return pd_chan->descs_allocated;
482 }
483
484 static void pd_free_chan_resources(struct dma_chan *chan)
485 {
486         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
487         struct pch_dma *pd = to_pd(chan->device);
488         struct pch_dma_desc *desc, *_d;
489         LIST_HEAD(tmp_list);
490
491         BUG_ON(!pdc_is_idle(pd_chan));
492         BUG_ON(!list_empty(&pd_chan->active_list));
493         BUG_ON(!list_empty(&pd_chan->queue));
494
495         spin_lock_bh(&pd_chan->lock);
496         list_splice_init(&pd_chan->free_list, &tmp_list);
497         pd_chan->descs_allocated = 0;
498         spin_unlock_bh(&pd_chan->lock);
499
500         list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
501                 pci_pool_free(pd->pool, desc, desc->txd.phys);
502
503         pdc_enable_irq(chan, 0);
504 }
505
506 static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
507                                     struct dma_tx_state *txstate)
508 {
509         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
510         dma_cookie_t last_used;
511         dma_cookie_t last_completed;
512         int ret;
513
514         spin_lock_bh(&pd_chan->lock);
515         last_completed = pd_chan->completed_cookie;
516         last_used = chan->cookie;
517         spin_unlock_bh(&pd_chan->lock);
518
519         ret = dma_async_is_complete(cookie, last_completed, last_used);
520
521         dma_set_tx_state(txstate, last_completed, last_used, 0);
522
523         return ret;
524 }
525
526 static void pd_issue_pending(struct dma_chan *chan)
527 {
528         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
529
530         if (pdc_is_idle(pd_chan)) {
531                 spin_lock_bh(&pd_chan->lock);
532                 pdc_advance_work(pd_chan);
533                 spin_unlock_bh(&pd_chan->lock);
534         }
535 }
536
537 static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
538                         struct scatterlist *sgl, unsigned int sg_len,
539                         enum dma_data_direction direction, unsigned long flags)
540 {
541         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
542         struct pch_dma_slave *pd_slave = chan->private;
543         struct pch_dma_desc *first = NULL;
544         struct pch_dma_desc *prev = NULL;
545         struct pch_dma_desc *desc = NULL;
546         struct scatterlist *sg;
547         dma_addr_t reg;
548         int i;
549
550         if (unlikely(!sg_len)) {
551                 dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
552                 return NULL;
553         }
554
555         if (direction == DMA_FROM_DEVICE)
556                 reg = pd_slave->rx_reg;
557         else if (direction == DMA_TO_DEVICE)
558                 reg = pd_slave->tx_reg;
559         else
560                 return NULL;
561
562         for_each_sg(sgl, sg, sg_len, i) {
563                 desc = pdc_desc_get(pd_chan);
564
565                 if (!desc)
566                         goto err_desc_get;
567
568                 desc->regs.dev_addr = reg;
569                 desc->regs.mem_addr = sg_phys(sg);
570                 desc->regs.size = sg_dma_len(sg);
571                 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
572
573                 switch (pd_slave->width) {
574                 case PCH_DMA_WIDTH_1_BYTE:
575                         if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
576                                 goto err_desc_get;
577                         desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
578                         break;
579                 case PCH_DMA_WIDTH_2_BYTES:
580                         if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
581                                 goto err_desc_get;
582                         desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
583                         break;
584                 case PCH_DMA_WIDTH_4_BYTES:
585                         if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
586                                 goto err_desc_get;
587                         desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
588                         break;
589                 default:
590                         goto err_desc_get;
591                 }
592
593
594                 if (!first) {
595                         first = desc;
596                 } else {
597                         prev->regs.next |= desc->txd.phys;
598                         list_add_tail(&desc->desc_node, &first->tx_list);
599                 }
600
601                 prev = desc;
602         }
603
604         if (flags & DMA_PREP_INTERRUPT)
605                 desc->regs.next = DMA_DESC_END_WITH_IRQ;
606         else
607                 desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
608
609         first->txd.cookie = -EBUSY;
610         desc->txd.flags = flags;
611
612         return &first->txd;
613
614 err_desc_get:
615         dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
616         pdc_desc_put(pd_chan, first);
617         return NULL;
618 }
619
620 static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
621                              unsigned long arg)
622 {
623         struct pch_dma_chan *pd_chan = to_pd_chan(chan);
624         struct pch_dma_desc *desc, *_d;
625         LIST_HEAD(list);
626
627         if (cmd != DMA_TERMINATE_ALL)
628                 return -ENXIO;
629
630         spin_lock_bh(&pd_chan->lock);
631
632         pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
633
634         list_splice_init(&pd_chan->active_list, &list);
635         list_splice_init(&pd_chan->queue, &list);
636
637         list_for_each_entry_safe(desc, _d, &list, desc_node)
638                 pdc_chain_complete(pd_chan, desc);
639
640         spin_unlock_bh(&pd_chan->lock);
641
642
643         return 0;
644 }
645
646 static void pdc_tasklet(unsigned long data)
647 {
648         struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
649
650         if (!pdc_is_idle(pd_chan)) {
651                 dev_err(chan2dev(&pd_chan->chan),
652                         "BUG: handle non-idle channel in tasklet\n");
653                 return;
654         }
655
656         spin_lock_bh(&pd_chan->lock);
657         if (test_and_clear_bit(0, &pd_chan->err_status))
658                 pdc_handle_error(pd_chan);
659         else
660                 pdc_advance_work(pd_chan);
661         spin_unlock_bh(&pd_chan->lock);
662 }
663
664 static irqreturn_t pd_irq(int irq, void *devid)
665 {
666         struct pch_dma *pd = (struct pch_dma *)devid;
667         struct pch_dma_chan *pd_chan;
668         u32 sts0;
669         int i;
670         int ret = IRQ_NONE;
671
672         sts0 = dma_readl(pd, STS0);
673
674         dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
675
676         for (i = 0; i < pd->dma.chancnt; i++) {
677                 pd_chan = &pd->channels[i];
678
679                 if (sts0 & DMA_STATUS_IRQ(i)) {
680                         if (sts0 & DMA_STATUS_ERR(i))
681                                 set_bit(0, &pd_chan->err_status);
682
683                         tasklet_schedule(&pd_chan->tasklet);
684                         ret = IRQ_HANDLED;
685                 }
686
687         }
688
689         /* clear interrupt bits in status register */
690         dma_writel(pd, STS0, sts0);
691
692         return ret;
693 }
694
695 static void pch_dma_save_regs(struct pch_dma *pd)
696 {
697         struct pch_dma_chan *pd_chan;
698         struct dma_chan *chan, *_c;
699         int i = 0;
700
701         pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
702         pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
703         pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
704
705         list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
706                 pd_chan = to_pd_chan(chan);
707
708                 pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
709                 pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
710                 pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
711                 pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
712
713                 i++;
714         }
715 }
716
717 static void pch_dma_restore_regs(struct pch_dma *pd)
718 {
719         struct pch_dma_chan *pd_chan;
720         struct dma_chan *chan, *_c;
721         int i = 0;
722
723         dma_writel(pd, CTL0, pd->regs.dma_ctl0);
724         dma_writel(pd, CTL1, pd->regs.dma_ctl1);
725         dma_writel(pd, CTL2, pd->regs.dma_ctl2);
726
727         list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
728                 pd_chan = to_pd_chan(chan);
729
730                 channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
731                 channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
732                 channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
733                 channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
734
735                 i++;
736         }
737 }
738
739 static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
740 {
741         struct pch_dma *pd = pci_get_drvdata(pdev);
742
743         if (pd)
744                 pch_dma_save_regs(pd);
745
746         pci_save_state(pdev);
747         pci_disable_device(pdev);
748         pci_set_power_state(pdev, pci_choose_state(pdev, state));
749
750         return 0;
751 }
752
753 static int pch_dma_resume(struct pci_dev *pdev)
754 {
755         struct pch_dma *pd = pci_get_drvdata(pdev);
756         int err;
757
758         pci_set_power_state(pdev, PCI_D0);
759         pci_restore_state(pdev);
760
761         err = pci_enable_device(pdev);
762         if (err) {
763                 dev_dbg(&pdev->dev, "failed to enable device\n");
764                 return err;
765         }
766
767         if (pd)
768                 pch_dma_restore_regs(pd);
769
770         return 0;
771 }
772
773 static int __devinit pch_dma_probe(struct pci_dev *pdev,
774                                    const struct pci_device_id *id)
775 {
776         struct pch_dma *pd;
777         struct pch_dma_regs *regs;
778         unsigned int nr_channels;
779         int err;
780         int i;
781
782         nr_channels = id->driver_data;
783         pd = kzalloc(sizeof(struct pch_dma)+
784                 sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL);
785         if (!pd)
786                 return -ENOMEM;
787
788         pci_set_drvdata(pdev, pd);
789
790         err = pci_enable_device(pdev);
791         if (err) {
792                 dev_err(&pdev->dev, "Cannot enable PCI device\n");
793                 goto err_free_mem;
794         }
795
796         if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
797                 dev_err(&pdev->dev, "Cannot find proper base address\n");
798                 goto err_disable_pdev;
799         }
800
801         err = pci_request_regions(pdev, DRV_NAME);
802         if (err) {
803                 dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
804                 goto err_disable_pdev;
805         }
806
807         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
808         if (err) {
809                 dev_err(&pdev->dev, "Cannot set proper DMA config\n");
810                 goto err_free_res;
811         }
812
813         regs = pd->membase = pci_iomap(pdev, 1, 0);
814         if (!pd->membase) {
815                 dev_err(&pdev->dev, "Cannot map MMIO registers\n");
816                 err = -ENOMEM;
817                 goto err_free_res;
818         }
819
820         pci_set_master(pdev);
821
822         err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
823         if (err) {
824                 dev_err(&pdev->dev, "Failed to request IRQ\n");
825                 goto err_iounmap;
826         }
827
828         pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
829                                    sizeof(struct pch_dma_desc), 4, 0);
830         if (!pd->pool) {
831                 dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
832                 err = -ENOMEM;
833                 goto err_free_irq;
834         }
835
836         pd->dma.dev = &pdev->dev;
837         pd->dma.chancnt = nr_channels;
838
839         INIT_LIST_HEAD(&pd->dma.channels);
840
841         for (i = 0; i < nr_channels; i++) {
842                 struct pch_dma_chan *pd_chan = &pd->channels[i];
843
844                 pd_chan->chan.device = &pd->dma;
845                 pd_chan->chan.cookie = 1;
846                 pd_chan->chan.chan_id = i;
847
848                 pd_chan->membase = &regs->desc[i];
849
850                 pd_chan->dir = (i % 2) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
851
852                 spin_lock_init(&pd_chan->lock);
853
854                 INIT_LIST_HEAD(&pd_chan->active_list);
855                 INIT_LIST_HEAD(&pd_chan->queue);
856                 INIT_LIST_HEAD(&pd_chan->free_list);
857
858                 tasklet_init(&pd_chan->tasklet, pdc_tasklet,
859                              (unsigned long)pd_chan);
860                 list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
861         }
862
863         dma_cap_zero(pd->dma.cap_mask);
864         dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
865         dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
866
867         pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
868         pd->dma.device_free_chan_resources = pd_free_chan_resources;
869         pd->dma.device_tx_status = pd_tx_status;
870         pd->dma.device_issue_pending = pd_issue_pending;
871         pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
872         pd->dma.device_control = pd_device_control;
873
874         err = dma_async_device_register(&pd->dma);
875         if (err) {
876                 dev_err(&pdev->dev, "Failed to register DMA device\n");
877                 goto err_free_pool;
878         }
879
880         return 0;
881
882 err_free_pool:
883         pci_pool_destroy(pd->pool);
884 err_free_irq:
885         free_irq(pdev->irq, pd);
886 err_iounmap:
887         pci_iounmap(pdev, pd->membase);
888 err_free_res:
889         pci_release_regions(pdev);
890 err_disable_pdev:
891         pci_disable_device(pdev);
892 err_free_mem:
893         return err;
894 }
895
896 static void __devexit pch_dma_remove(struct pci_dev *pdev)
897 {
898         struct pch_dma *pd = pci_get_drvdata(pdev);
899         struct pch_dma_chan *pd_chan;
900         struct dma_chan *chan, *_c;
901
902         if (pd) {
903                 dma_async_device_unregister(&pd->dma);
904
905                 list_for_each_entry_safe(chan, _c, &pd->dma.channels,
906                                          device_node) {
907                         pd_chan = to_pd_chan(chan);
908
909                         tasklet_disable(&pd_chan->tasklet);
910                         tasklet_kill(&pd_chan->tasklet);
911                 }
912
913                 pci_pool_destroy(pd->pool);
914                 free_irq(pdev->irq, pd);
915                 pci_iounmap(pdev, pd->membase);
916                 pci_release_regions(pdev);
917                 pci_disable_device(pdev);
918                 kfree(pd);
919         }
920 }
921
922 /* PCI Device ID of DMA device */
923 #define PCI_DEVICE_ID_PCH_DMA_8CH        0x8810
924 #define PCI_DEVICE_ID_PCH_DMA_4CH        0x8815
925
926 static const struct pci_device_id pch_dma_id_table[] = {
927         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 },
928         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 },
929 };
930
931 static struct pci_driver pch_dma_driver = {
932         .name           = DRV_NAME,
933         .id_table       = pch_dma_id_table,
934         .probe          = pch_dma_probe,
935         .remove         = __devexit_p(pch_dma_remove),
936 #ifdef CONFIG_PM
937         .suspend        = pch_dma_suspend,
938         .resume         = pch_dma_resume,
939 #endif
940 };
941
942 static int __init pch_dma_init(void)
943 {
944         return pci_register_driver(&pch_dma_driver);
945 }
946
947 static void __exit pch_dma_exit(void)
948 {
949         pci_unregister_driver(&pch_dma_driver);
950 }
951
952 module_init(pch_dma_init);
953 module_exit(pch_dma_exit);
954
955 MODULE_DESCRIPTION("Topcliff PCH DMA controller driver");
956 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
957 MODULE_LICENSE("GPL v2");