1 /* linux/drivers/dma/pl330.c
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/dmaengine.h>
17 #include <linux/interrupt.h>
18 #include <linux/amba/bus.h>
19 #include <linux/amba/pl330.h>
21 #include "dmaengine.h"
23 #define NR_DEFAULT_DESC 16
26 /* In the DMAC pool */
29 * Allocted to some channel during prep_xxx
30 * Also may be sitting on the work_list.
34 * Sitting on the work_list and already submitted
35 * to the PL330 core. Not more than two descriptors
36 * of a channel can be BUSY at any time.
40 * Sitting on the channel work_list but xfer done
46 struct dma_pl330_chan {
47 /* Schedule desc completion */
48 struct tasklet_struct task;
50 /* DMA-Engine Channel */
53 /* List of to be xfered descriptors */
54 struct list_head work_list;
56 /* Pointer to the DMAC that manages this channel,
57 * NULL if the channel is available to be acquired.
58 * As the parent, this DMAC also provides descriptors
61 struct dma_pl330_dmac *dmac;
63 /* To protect channel manipulation */
66 /* Token of a hardware channel thread of PL330 DMAC
67 * NULL if the channel is available to be acquired.
72 struct dma_pl330_dmac {
73 struct pl330_info pif;
75 /* DMA-Engine Device */
76 struct dma_device ddma;
78 /* Pool of descriptors available for the DMAC's channels */
79 struct list_head desc_pool;
80 /* To protect desc_pool manipulation */
83 /* Peripheral channels connected to this DMAC */
84 struct dma_pl330_chan *peripherals; /* keep at end */
87 struct dma_pl330_desc {
88 /* To attach to a queue as child */
89 struct list_head node;
91 /* Descriptor for the DMA Engine API */
92 struct dma_async_tx_descriptor txd;
94 /* Xfer for PL330 core */
97 struct pl330_reqcfg rqcfg;
100 enum desc_status status;
102 /* The channel which currently holds this desc */
103 struct dma_pl330_chan *pchan;
106 static inline struct dma_pl330_chan *
107 to_pchan(struct dma_chan *ch)
112 return container_of(ch, struct dma_pl330_chan, chan);
115 static inline struct dma_pl330_desc *
116 to_desc(struct dma_async_tx_descriptor *tx)
118 return container_of(tx, struct dma_pl330_desc, txd);
121 static inline void free_desc_list(struct list_head *list)
123 struct dma_pl330_dmac *pdmac;
124 struct dma_pl330_desc *desc;
125 struct dma_pl330_chan *pch;
128 if (list_empty(list))
131 /* Finish off the work list */
132 list_for_each_entry(desc, list, node) {
133 dma_async_tx_callback callback;
136 /* All desc in a list belong to same channel */
138 callback = desc->txd.callback;
139 param = desc->txd.callback_param;
149 spin_lock_irqsave(&pdmac->pool_lock, flags);
150 list_splice_tail_init(list, &pdmac->desc_pool);
151 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
154 static inline void fill_queue(struct dma_pl330_chan *pch)
156 struct dma_pl330_desc *desc;
159 list_for_each_entry(desc, &pch->work_list, node) {
161 /* If already submitted */
162 if (desc->status == BUSY)
165 ret = pl330_submit_req(pch->pl330_chid,
170 } else if (ret == -EAGAIN) {
171 /* QFull or DMAC Dying */
174 /* Unacceptable request */
176 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
177 __func__, __LINE__, desc->txd.cookie);
178 tasklet_schedule(&pch->task);
183 static void pl330_tasklet(unsigned long data)
185 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
186 struct dma_pl330_desc *desc, *_dt;
190 spin_lock_irqsave(&pch->lock, flags);
192 /* Pick up ripe tomatoes */
193 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
194 if (desc->status == DONE) {
195 dma_cookie_complete(&desc->txd);
196 list_move_tail(&desc->node, &list);
199 /* Try to submit a req imm. next to the last completed cookie */
202 /* Make sure the PL330 Channel thread is active */
203 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
205 spin_unlock_irqrestore(&pch->lock, flags);
207 free_desc_list(&list);
210 static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
212 struct dma_pl330_desc *desc = token;
213 struct dma_pl330_chan *pch = desc->pchan;
216 /* If desc aborted */
220 spin_lock_irqsave(&pch->lock, flags);
224 spin_unlock_irqrestore(&pch->lock, flags);
226 tasklet_schedule(&pch->task);
229 static int pl330_alloc_chan_resources(struct dma_chan *chan)
231 struct dma_pl330_chan *pch = to_pchan(chan);
232 struct dma_pl330_dmac *pdmac = pch->dmac;
235 spin_lock_irqsave(&pch->lock, flags);
237 dma_cookie_init(chan);
240 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
241 if (!pch->pl330_chid) {
242 spin_unlock_irqrestore(&pch->lock, flags);
246 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
248 spin_unlock_irqrestore(&pch->lock, flags);
253 static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
255 struct dma_pl330_chan *pch = to_pchan(chan);
256 struct dma_pl330_desc *desc;
259 /* Only supports DMA_TERMINATE_ALL */
260 if (cmd != DMA_TERMINATE_ALL)
263 spin_lock_irqsave(&pch->lock, flags);
265 /* FLUSH the PL330 Channel thread */
266 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
268 /* Mark all desc done */
269 list_for_each_entry(desc, &pch->work_list, node)
272 spin_unlock_irqrestore(&pch->lock, flags);
274 pl330_tasklet((unsigned long) pch);
279 static void pl330_free_chan_resources(struct dma_chan *chan)
281 struct dma_pl330_chan *pch = to_pchan(chan);
284 spin_lock_irqsave(&pch->lock, flags);
286 tasklet_kill(&pch->task);
288 pl330_release_channel(pch->pl330_chid);
289 pch->pl330_chid = NULL;
291 spin_unlock_irqrestore(&pch->lock, flags);
294 static enum dma_status
295 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
296 struct dma_tx_state *txstate)
298 return dma_cookie_status(chan, cookie, txstate);
301 static void pl330_issue_pending(struct dma_chan *chan)
303 pl330_tasklet((unsigned long) to_pchan(chan));
307 * We returned the last one of the circular list of descriptor(s)
308 * from prep_xxx, so the argument to submit corresponds to the last
309 * descriptor of the list.
311 static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
313 struct dma_pl330_desc *desc, *last = to_desc(tx);
314 struct dma_pl330_chan *pch = to_pchan(tx->chan);
318 spin_lock_irqsave(&pch->lock, flags);
320 /* Assign cookies to all nodes */
321 while (!list_empty(&last->node)) {
322 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
324 dma_cookie_assign(&desc->txd);
326 list_move_tail(&desc->node, &pch->work_list);
329 cookie = dma_cookie_assign(&last->txd);
330 list_add_tail(&last->node, &pch->work_list);
331 spin_unlock_irqrestore(&pch->lock, flags);
336 static inline void _init_desc(struct dma_pl330_desc *desc)
339 desc->req.x = &desc->px;
340 desc->req.token = desc;
341 desc->rqcfg.swap = SWAP_NO;
342 desc->rqcfg.privileged = 0;
343 desc->rqcfg.insnaccess = 0;
344 desc->rqcfg.scctl = SCCTRL0;
345 desc->rqcfg.dcctl = DCCTRL0;
346 desc->req.cfg = &desc->rqcfg;
347 desc->req.xfer_cb = dma_pl330_rqcb;
348 desc->txd.tx_submit = pl330_tx_submit;
350 INIT_LIST_HEAD(&desc->node);
353 /* Returns the number of descriptors added to the DMAC pool */
354 int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
356 struct dma_pl330_desc *desc;
363 desc = kmalloc(count * sizeof(*desc), flg);
367 spin_lock_irqsave(&pdmac->pool_lock, flags);
369 for (i = 0; i < count; i++) {
370 _init_desc(&desc[i]);
371 list_add_tail(&desc[i].node, &pdmac->desc_pool);
374 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
379 static struct dma_pl330_desc *
380 pluck_desc(struct dma_pl330_dmac *pdmac)
382 struct dma_pl330_desc *desc = NULL;
388 spin_lock_irqsave(&pdmac->pool_lock, flags);
390 if (!list_empty(&pdmac->desc_pool)) {
391 desc = list_entry(pdmac->desc_pool.next,
392 struct dma_pl330_desc, node);
394 list_del_init(&desc->node);
397 desc->txd.callback = NULL;
400 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
405 static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
407 struct dma_pl330_dmac *pdmac = pch->dmac;
408 struct dma_pl330_peri *peri = pch->chan.private;
409 struct dma_pl330_desc *desc;
411 /* Pluck one desc from the pool of DMAC */
412 desc = pluck_desc(pdmac);
414 /* If the DMAC pool is empty, alloc new */
416 if (!add_desc(pdmac, GFP_ATOMIC, 1))
420 desc = pluck_desc(pdmac);
422 dev_err(pch->dmac->pif.dev,
423 "%s:%d ALERT!\n", __func__, __LINE__);
428 /* Initialize the descriptor */
430 desc->txd.cookie = 0;
431 async_tx_ack(&desc->txd);
434 desc->req.rqtype = peri->rqtype;
435 desc->req.peri = peri->peri_id;
437 desc->req.rqtype = MEMTOMEM;
441 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
446 static inline void fill_px(struct pl330_xfer *px,
447 dma_addr_t dst, dma_addr_t src, size_t len)
455 static struct dma_pl330_desc *
456 __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
457 dma_addr_t src, size_t len)
459 struct dma_pl330_desc *desc = pl330_get_desc(pch);
462 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
468 * Ideally we should lookout for reqs bigger than
469 * those that can be programmed with 256 bytes of
470 * MC buffer, but considering a req size is seldom
471 * going to be word-unaligned and more than 200MB,
473 * Also, should the limit is reached we'd rather
474 * have the platform increase MC buffer size than
475 * complicating this API driver.
477 fill_px(&desc->px, dst, src, len);
482 /* Call after fixing burst size */
483 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
485 struct dma_pl330_chan *pch = desc->pchan;
486 struct pl330_info *pi = &pch->dmac->pif;
489 burst_len = pi->pcfg.data_bus_width / 8;
490 burst_len *= pi->pcfg.data_buf_dep;
491 burst_len >>= desc->rqcfg.brst_size;
493 /* src/dst_burst_len can't be more than 16 */
497 while (burst_len > 1) {
498 if (!(len % (burst_len << desc->rqcfg.brst_size)))
506 static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
507 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
508 size_t period_len, enum dma_transfer_direction direction,
511 struct dma_pl330_desc *desc;
512 struct dma_pl330_chan *pch = to_pchan(chan);
516 desc = pl330_get_desc(pch);
518 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
525 desc->rqcfg.src_inc = 1;
526 desc->rqcfg.dst_inc = 0;
527 desc->req.rqtype = MEMTODEV;
529 dst = pch->fifo_addr;
532 desc->rqcfg.src_inc = 0;
533 desc->rqcfg.dst_inc = 1;
534 desc->req.rqtype = DEVTOMEM;
535 src = pch->fifo_addr;
539 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
544 desc->rqcfg.brst_size = pch->burst_sz;
545 desc->rqcfg.brst_len = 1;
549 fill_px(&desc->px, dst, src, period_len);
554 static struct dma_async_tx_descriptor *
555 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
556 dma_addr_t src, size_t len, unsigned long flags)
558 struct dma_pl330_desc *desc;
559 struct dma_pl330_chan *pch = to_pchan(chan);
560 struct dma_pl330_peri *peri = chan->private;
561 struct pl330_info *pi;
564 if (unlikely(!pch || !len))
567 if (peri && peri->rqtype != MEMTOMEM)
570 pi = &pch->dmac->pif;
572 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
576 desc->rqcfg.src_inc = 1;
577 desc->rqcfg.dst_inc = 1;
579 /* Select max possible burst size */
580 burst = pi->pcfg.data_bus_width / 8;
588 desc->rqcfg.brst_size = 0;
589 while (burst != (1 << desc->rqcfg.brst_size))
590 desc->rqcfg.brst_size++;
592 desc->rqcfg.brst_len = get_burst_len(desc, len);
594 desc->txd.flags = flags;
599 static struct dma_async_tx_descriptor *
600 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
601 unsigned int sg_len, enum dma_transfer_direction direction,
602 unsigned long flg, void *context)
604 struct dma_pl330_desc *first, *desc = NULL;
605 struct dma_pl330_chan *pch = to_pchan(chan);
606 struct dma_pl330_peri *peri = chan->private;
607 struct scatterlist *sg;
612 if (unlikely(!pch || !sgl || !sg_len || !peri))
615 /* Make sure the direction is consistent */
616 if ((direction == DMA_TO_DEVICE &&
617 peri->rqtype != MEMTODEV) ||
618 (direction == DMA_FROM_DEVICE &&
619 peri->rqtype != DEVTOMEM)) {
620 dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
625 addr = peri->fifo_addr;
626 burst_size = peri->burst_sz;
630 for_each_sg(sgl, sg, sg_len, i) {
632 desc = pl330_get_desc(pch);
634 struct dma_pl330_dmac *pdmac = pch->dmac;
636 dev_err(pch->dmac->pif.dev,
637 "%s:%d Unable to fetch desc\n",
642 spin_lock_irqsave(&pdmac->pool_lock, flags);
644 while (!list_empty(&first->node)) {
645 desc = list_entry(first->node.next,
646 struct dma_pl330_desc, node);
647 list_move_tail(&desc->node, &pdmac->desc_pool);
650 list_move_tail(&first->node, &pdmac->desc_pool);
652 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
660 list_add_tail(&desc->node, &first->node);
662 if (direction == DMA_TO_DEVICE) {
663 desc->rqcfg.src_inc = 1;
664 desc->rqcfg.dst_inc = 0;
666 addr, sg_dma_address(sg), sg_dma_len(sg));
668 desc->rqcfg.src_inc = 0;
669 desc->rqcfg.dst_inc = 1;
671 sg_dma_address(sg), addr, sg_dma_len(sg));
674 desc->rqcfg.brst_size = burst_size;
675 desc->rqcfg.brst_len = 1;
678 /* Return the last desc in the chain */
679 desc->txd.flags = flg;
683 static irqreturn_t pl330_irq_handler(int irq, void *data)
685 if (pl330_update(data))
692 pl330_probe(struct amba_device *adev, const struct amba_id *id)
694 struct dma_pl330_platdata *pdat;
695 struct dma_pl330_dmac *pdmac;
696 struct dma_pl330_chan *pch;
697 struct pl330_info *pi;
698 struct dma_device *pd;
699 struct resource *res;
703 pdat = adev->dev.platform_data;
705 /* Allocate a new DMAC and its Channels */
706 pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
708 dev_err(&adev->dev, "unable to allocate mem\n");
713 pi->dev = &adev->dev;
714 pi->pl330_data = NULL;
715 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
718 request_mem_region(res->start, resource_size(res), "dma-pl330");
720 pi->base = ioremap(res->start, resource_size(res));
727 ret = request_irq(irq, pl330_irq_handler, 0,
728 dev_name(&adev->dev), pi);
736 INIT_LIST_HEAD(&pdmac->desc_pool);
737 spin_lock_init(&pdmac->pool_lock);
739 /* Create a descriptor pool of default size */
740 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
741 dev_warn(&adev->dev, "unable to allocate desc\n");
744 INIT_LIST_HEAD(&pd->channels);
746 /* Initialize channel parameters */
747 num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
748 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
750 for (i = 0; i < num_chan; i++) {
751 pch = &pdmac->peripherals[i];
753 struct dma_pl330_peri *peri = &pdat->peri[i];
755 switch (peri->rqtype) {
757 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
761 dma_cap_set(DMA_SLAVE, pd->cap_mask);
764 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
767 pch->chan.private = peri;
769 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
770 pch->chan.private = NULL;
773 INIT_LIST_HEAD(&pch->work_list);
774 spin_lock_init(&pch->lock);
775 pch->pl330_chid = NULL;
776 pch->chan.device = pd;
777 pch->chan.chan_id = i;
780 /* Add the channel to the DMAC list */
782 list_add_tail(&pch->chan.device_node, &pd->channels);
785 pd->dev = &adev->dev;
787 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
788 pd->device_free_chan_resources = pl330_free_chan_resources;
789 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
790 pd->device_tx_status = pl330_tx_status;
791 pd->device_prep_slave_sg = pl330_prep_slave_sg;
792 pd->device_control = pl330_control;
793 pd->device_issue_pending = pl330_issue_pending;
795 ret = dma_async_device_register(pd);
797 dev_err(&adev->dev, "unable to register DMAC\n");
801 amba_set_drvdata(adev, pdmac);
804 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
806 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
807 pi->pcfg.data_buf_dep,
808 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
809 pi->pcfg.num_peri, pi->pcfg.num_events);
820 release_mem_region(res->start, resource_size(res));
826 static int __devexit pl330_remove(struct amba_device *adev)
828 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
829 struct dma_pl330_chan *pch, *_p;
830 struct pl330_info *pi;
831 struct resource *res;
837 amba_set_drvdata(adev, NULL);
840 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
843 /* Remove the channel */
844 list_del(&pch->chan.device_node);
846 /* Flush the channel */
847 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
848 pl330_free_chan_resources(&pch->chan);
861 release_mem_region(res->start, resource_size(res));
868 static struct amba_id pl330_ids[] = {
876 static struct amba_driver pl330_driver = {
878 .owner = THIS_MODULE,
881 .id_table = pl330_ids,
882 .probe = pl330_probe,
883 .remove = pl330_remove,
886 static int __init pl330_init(void)
888 return amba_driver_register(&pl330_driver);
890 module_init(pl330_init);
892 static void __exit pl330_exit(void)
894 amba_driver_unregister(&pl330_driver);
897 module_exit(pl330_exit);
899 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
900 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
901 MODULE_LICENSE("GPL");