2 * intel_mid_dma.c - Intel Langwell DMA Drivers
4 * Copyright (C) 2008-10 Intel Corp
5 * Author: Vinod Koul <vinod.koul@intel.com>
6 * The driver design is based on dw_dmac driver
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 #include <linux/pci.h>
27 #include <linux/interrupt.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/intel_mid_dma.h>
31 #include "dmaengine.h"
33 #define MAX_CHAN 4 /*max ch across controllers*/
34 #include "intel_mid_dma_regs.h"
36 #define INTEL_MID_DMAC1_ID 0x0814
37 #define INTEL_MID_DMAC2_ID 0x0813
38 #define INTEL_MID_GP_DMAC2_ID 0x0827
39 #define INTEL_MFLD_DMAC1_ID 0x0830
40 #define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008
41 #define LNW_PERIPHRAL_MASK_SIZE 0x10
42 #define LNW_PERIPHRAL_STATUS 0x0
43 #define LNW_PERIPHRAL_MASK 0x8
45 struct intel_mid_dma_probe_info {
52 #define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
53 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \
54 .max_chan = (_max_chan), \
55 .ch_base = (_ch_base), \
56 .block_size = (_block_size), \
57 .pimr_mask = (_pimr_mask), \
60 /*****************************************************************************
63 * get_ch_index - convert status to channel
64 * @status: status mask
65 * @base: dma ch base value
67 * Modify the status mask and return the channel index needing
68 * attention (or -1 if neither)
70 static int get_ch_index(int *status, unsigned int base)
73 for (i = 0; i < MAX_CHAN; i++) {
74 if (*status & (1 << (i + base))) {
75 *status = *status & ~(1 << (i + base));
76 pr_debug("MDMA: index %d New status %x\n", i, *status);
84 * get_block_ts - calculates dma transaction length
85 * @len: dma transfer length
86 * @tx_width: dma transfer src width
87 * @block_size: dma controller max block size
89 * Based on src width calculate the DMA trsaction length in data items
90 * return data items or FFFF if exceeds max length for block
92 static int get_block_ts(int len, int tx_width, int block_size)
94 int byte_width = 0, block_ts = 0;
97 case DMA_SLAVE_BUSWIDTH_1_BYTE:
100 case DMA_SLAVE_BUSWIDTH_2_BYTES:
103 case DMA_SLAVE_BUSWIDTH_4_BYTES:
109 block_ts = len/byte_width;
110 if (block_ts > block_size)
115 /*****************************************************************************
116 DMAC1 interrupt Functions*/
119 * dmac1_mask_periphral_intr - mask the periphral interrupt
120 * @midc: dma channel for which masking is required
122 * Masks the DMA periphral interrupt
123 * this is valid for DMAC1 family controllers only
124 * This controller should have periphral mask registers already mapped
126 static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc)
129 struct middma_device *mid = to_middma_device(midc->chan.device);
131 if (mid->pimr_mask) {
132 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
133 pimr |= mid->pimr_mask;
134 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
140 * dmac1_unmask_periphral_intr - unmask the periphral interrupt
141 * @midc: dma channel for which masking is required
143 * UnMasks the DMA periphral interrupt,
144 * this is valid for DMAC1 family controllers only
145 * This controller should have periphral mask registers already mapped
147 static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc)
150 struct middma_device *mid = to_middma_device(midc->chan.device);
152 if (mid->pimr_mask) {
153 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
154 pimr &= ~mid->pimr_mask;
155 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
161 * enable_dma_interrupt - enable the periphral interrupt
162 * @midc: dma channel for which enable interrupt is required
164 * Enable the DMA periphral interrupt,
165 * this is valid for DMAC1 family controllers only
166 * This controller should have periphral mask registers already mapped
168 static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
170 dmac1_unmask_periphral_intr(midc);
173 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
174 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
179 * disable_dma_interrupt - disable the periphral interrupt
180 * @midc: dma channel for which disable interrupt is required
182 * Disable the DMA periphral interrupt,
183 * this is valid for DMAC1 family controllers only
184 * This controller should have periphral mask registers already mapped
186 static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
188 /*Check LPE PISR, make sure fwd is disabled*/
189 dmac1_mask_periphral_intr(midc);
190 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
191 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
192 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
196 /*****************************************************************************
197 DMA channel helper Functions*/
199 * mid_desc_get - get a descriptor
200 * @midc: dma channel for which descriptor is required
202 * Obtain a descriptor for the channel. Returns NULL if none are free.
203 * Once the descriptor is returned it is private until put on another
206 static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc)
208 struct intel_mid_dma_desc *desc, *_desc;
209 struct intel_mid_dma_desc *ret = NULL;
211 spin_lock_bh(&midc->lock);
212 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
213 if (async_tx_test_ack(&desc->txd)) {
214 list_del(&desc->desc_node);
219 spin_unlock_bh(&midc->lock);
224 * mid_desc_put - put a descriptor
225 * @midc: dma channel for which descriptor is required
226 * @desc: descriptor to put
228 * Return a descriptor from lwn_desc_get back to the free pool
230 static void midc_desc_put(struct intel_mid_dma_chan *midc,
231 struct intel_mid_dma_desc *desc)
234 spin_lock_bh(&midc->lock);
235 list_add_tail(&desc->desc_node, &midc->free_list);
236 spin_unlock_bh(&midc->lock);
240 * midc_dostart - begin a DMA transaction
241 * @midc: channel for which txn is to be started
242 * @first: first descriptor of series
244 * Load a transaction into the engine. This must be called with midc->lock
245 * held and bh disabled.
247 static void midc_dostart(struct intel_mid_dma_chan *midc,
248 struct intel_mid_dma_desc *first)
250 struct middma_device *mid = to_middma_device(midc->chan.device);
252 /* channel is idle */
253 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
255 pr_err("ERR_MDMA: channel is busy in start\n");
256 /* The tasklet will hopefully advance the queue... */
260 /*write registers and en*/
261 iowrite32(first->sar, midc->ch_regs + SAR);
262 iowrite32(first->dar, midc->ch_regs + DAR);
263 iowrite32(first->lli_phys, midc->ch_regs + LLP);
264 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
265 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
266 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
267 iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
268 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
269 (int)first->sar, (int)first->dar, first->cfg_hi,
270 first->cfg_lo, first->ctl_hi, first->ctl_lo);
271 first->status = DMA_IN_PROGRESS;
273 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
277 * midc_descriptor_complete - process completed descriptor
278 * @midc: channel owning the descriptor
279 * @desc: the descriptor itself
281 * Process a completed descriptor and perform any callbacks upon
282 * the completion. The completion handling drops the lock during the
283 * callbacks but must be called with the lock held.
285 static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
286 struct intel_mid_dma_desc *desc)
288 struct dma_async_tx_descriptor *txd = &desc->txd;
289 dma_async_tx_callback callback_txd = NULL;
290 struct intel_mid_dma_lli *llitem;
291 void *param_txd = NULL;
293 dma_cookie_complete(txd);
294 callback_txd = txd->callback;
295 param_txd = txd->callback_param;
297 if (desc->lli != NULL) {
298 /*clear the DONE bit of completed LLI in memory*/
299 llitem = desc->lli + desc->current_lli;
300 llitem->ctl_hi &= CLEAR_DONE;
301 if (desc->current_lli < desc->lli_length-1)
302 (desc->current_lli)++;
304 desc->current_lli = 0;
306 spin_unlock_bh(&midc->lock);
308 pr_debug("MDMA: TXD callback set ... calling\n");
309 callback_txd(param_txd);
312 desc->status = DMA_SUCCESS;
313 if (desc->lli != NULL) {
314 pci_pool_free(desc->lli_pool, desc->lli,
316 pci_pool_destroy(desc->lli_pool);
318 list_move(&desc->desc_node, &midc->free_list);
321 spin_lock_bh(&midc->lock);
325 * midc_scan_descriptors - check the descriptors in channel
326 * mark completed when tx is completete
328 * @midc: channel to scan
330 * Walk the descriptor chain for the device and process any entries
333 static void midc_scan_descriptors(struct middma_device *mid,
334 struct intel_mid_dma_chan *midc)
336 struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
339 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
340 if (desc->status == DMA_IN_PROGRESS)
341 midc_descriptor_complete(midc, desc);
346 * midc_lli_fill_sg - Helper function to convert
347 * SG list to Linked List Items.
349 *@desc: DMA descriptor
350 *@sglist: Pointer to SG list
351 *@sglen: SG list length
352 *@flags: DMA transaction flags
354 * Walk through the SG list and convert the SG list into Linked
357 static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
358 struct intel_mid_dma_desc *desc,
359 struct scatterlist *sglist,
363 struct intel_mid_dma_slave *mids;
364 struct scatterlist *sg;
365 dma_addr_t lli_next, sg_phy_addr;
366 struct intel_mid_dma_lli *lli_bloc_desc;
367 union intel_mid_dma_ctl_lo ctl_lo;
368 union intel_mid_dma_ctl_hi ctl_hi;
371 pr_debug("MDMA: Entered midc_lli_fill_sg\n");
372 mids = midc->mid_slave;
374 lli_bloc_desc = desc->lli;
375 lli_next = desc->lli_phys;
377 ctl_lo.ctl_lo = desc->ctl_lo;
378 ctl_hi.ctl_hi = desc->ctl_hi;
379 for_each_sg(sglist, sg, sglen, i) {
380 /*Populate CTL_LOW and LLI values*/
381 if (i != sglen - 1) {
382 lli_next = lli_next +
383 sizeof(struct intel_mid_dma_lli);
385 /*Check for circular list, otherwise terminate LLI to ZERO*/
386 if (flags & DMA_PREP_CIRCULAR_LIST) {
387 pr_debug("MDMA: LLI is configured in circular mode\n");
388 lli_next = desc->lli_phys;
391 ctl_lo.ctlx.llp_dst_en = 0;
392 ctl_lo.ctlx.llp_src_en = 0;
395 /*Populate CTL_HI values*/
396 ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
398 midc->dma->block_size);
399 /*Populate SAR and DAR values*/
400 sg_phy_addr = sg_phys(sg);
401 if (desc->dirn == DMA_TO_DEVICE) {
402 lli_bloc_desc->sar = sg_phy_addr;
403 lli_bloc_desc->dar = mids->dma_slave.dst_addr;
404 } else if (desc->dirn == DMA_FROM_DEVICE) {
405 lli_bloc_desc->sar = mids->dma_slave.src_addr;
406 lli_bloc_desc->dar = sg_phy_addr;
408 /*Copy values into block descriptor in system memroy*/
409 lli_bloc_desc->llp = lli_next;
410 lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
411 lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
415 /*Copy very first LLI values to descriptor*/
416 desc->ctl_lo = desc->lli->ctl_lo;
417 desc->ctl_hi = desc->lli->ctl_hi;
418 desc->sar = desc->lli->sar;
419 desc->dar = desc->lli->dar;
423 /*****************************************************************************
424 DMA engine callback Functions*/
426 * intel_mid_dma_tx_submit - callback to submit DMA transaction
427 * @tx: dma engine descriptor
429 * Submit the DMA trasaction for this descriptor, start if ch idle
431 static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
433 struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx);
434 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan);
437 spin_lock_bh(&midc->lock);
438 cookie = dma_cookie_assign(tx);
440 if (list_empty(&midc->active_list))
441 list_add_tail(&desc->desc_node, &midc->active_list);
443 list_add_tail(&desc->desc_node, &midc->queue);
445 midc_dostart(midc, desc);
446 spin_unlock_bh(&midc->lock);
452 * intel_mid_dma_issue_pending - callback to issue pending txn
453 * @chan: chan where pending trascation needs to be checked and submitted
455 * Call for scan to issue pending descriptors
457 static void intel_mid_dma_issue_pending(struct dma_chan *chan)
459 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
461 spin_lock_bh(&midc->lock);
462 if (!list_empty(&midc->queue))
463 midc_scan_descriptors(to_middma_device(chan->device), midc);
464 spin_unlock_bh(&midc->lock);
468 * intel_mid_dma_tx_status - Return status of txn
469 * @chan: chan for where status needs to be checked
470 * @cookie: cookie for txn
471 * @txstate: DMA txn state
473 * Return status of DMA txn
475 static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
477 struct dma_tx_state *txstate)
481 ret = dma_cookie_status(chan, cookie, txstate);
482 if (ret != DMA_SUCCESS) {
483 midc_scan_descriptors(to_middma_device(chan->device), midc);
484 ret = dma_cookie_status(chan, cookie, txstate);
490 static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
492 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
493 struct dma_slave_config *slave = (struct dma_slave_config *)arg;
494 struct intel_mid_dma_slave *mid_slave;
498 pr_debug("MDMA: slave control called\n");
500 mid_slave = to_intel_mid_dma_slave(slave);
504 midc->mid_slave = mid_slave;
508 * intel_mid_dma_device_control - DMA device control
509 * @chan: chan for DMA control
511 * @arg: cmd arg value
513 * Perform DMA control command
515 static int intel_mid_dma_device_control(struct dma_chan *chan,
516 enum dma_ctrl_cmd cmd, unsigned long arg)
518 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
519 struct middma_device *mid = to_middma_device(chan->device);
520 struct intel_mid_dma_desc *desc, *_desc;
521 union intel_mid_dma_cfg_lo cfg_lo;
523 if (cmd == DMA_SLAVE_CONFIG)
524 return dma_slave_control(chan, arg);
526 if (cmd != DMA_TERMINATE_ALL)
529 spin_lock_bh(&midc->lock);
530 if (midc->busy == false) {
531 spin_unlock_bh(&midc->lock);
534 /*Suspend and disable the channel*/
535 cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
536 cfg_lo.cfgx.ch_susp = 1;
537 iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
538 iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
540 /* Disable interrupts */
541 disable_dma_interrupt(midc);
542 midc->descs_allocated = 0;
544 spin_unlock_bh(&midc->lock);
545 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
546 if (desc->lli != NULL) {
547 pci_pool_free(desc->lli_pool, desc->lli,
549 pci_pool_destroy(desc->lli_pool);
551 list_move(&desc->desc_node, &midc->free_list);
558 * intel_mid_dma_prep_memcpy - Prep memcpy txn
559 * @chan: chan for DMA transfer
560 * @dest: destn address
562 * @len: DMA transfer len
565 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
566 * The periphral txn details should be filled in slave structure properly
567 * Returns the descriptor for this txn
569 static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
570 struct dma_chan *chan, dma_addr_t dest,
571 dma_addr_t src, size_t len, unsigned long flags)
573 struct intel_mid_dma_chan *midc;
574 struct intel_mid_dma_desc *desc = NULL;
575 struct intel_mid_dma_slave *mids;
576 union intel_mid_dma_ctl_lo ctl_lo;
577 union intel_mid_dma_ctl_hi ctl_hi;
578 union intel_mid_dma_cfg_lo cfg_lo;
579 union intel_mid_dma_cfg_hi cfg_hi;
580 enum dma_slave_buswidth width;
582 pr_debug("MDMA: Prep for memcpy\n");
587 midc = to_intel_mid_dma_chan(chan);
590 mids = midc->mid_slave;
593 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
594 midc->dma->pci_id, midc->ch_id, len);
595 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
596 mids->cfg_mode, mids->dma_slave.direction,
597 mids->hs_mode, mids->dma_slave.src_addr_width);
600 if (mids->hs_mode == LNW_DMA_SW_HS) {
602 cfg_lo.cfgx.hs_sel_dst = 1;
603 cfg_lo.cfgx.hs_sel_src = 1;
604 } else if (mids->hs_mode == LNW_DMA_HW_HS)
605 cfg_lo.cfg_lo = 0x00000;
608 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
613 if (midc->dma->pimr_mask) {
614 cfg_hi.cfgx.protctl = 0x0; /*default value*/
615 cfg_hi.cfgx.fifo_mode = 1;
616 if (mids->dma_slave.direction == DMA_TO_DEVICE) {
617 cfg_hi.cfgx.src_per = 0;
618 if (mids->device_instance == 0)
619 cfg_hi.cfgx.dst_per = 3;
620 if (mids->device_instance == 1)
621 cfg_hi.cfgx.dst_per = 1;
622 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
623 if (mids->device_instance == 0)
624 cfg_hi.cfgx.src_per = 2;
625 if (mids->device_instance == 1)
626 cfg_hi.cfgx.src_per = 0;
627 cfg_hi.cfgx.dst_per = 0;
630 cfg_hi.cfgx.protctl = 0x1; /*default value*/
631 cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
632 midc->ch_id - midc->dma->chan_base;
637 ctl_hi.ctlx.reser = 0;
638 ctl_hi.ctlx.done = 0;
639 width = mids->dma_slave.src_addr_width;
641 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
642 pr_debug("MDMA:calc len %d for block size %d\n",
643 ctl_hi.ctlx.block_ts, midc->dma->block_size);
646 ctl_lo.ctlx.int_en = 1;
647 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
648 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
651 * Here we need some translation from "enum dma_slave_buswidth"
652 * to the format for our dma controller
653 * standard intel_mid_dmac's format
658 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
659 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
661 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
662 ctl_lo.ctlx.tt_fc = 0;
663 ctl_lo.ctlx.sinc = 0;
664 ctl_lo.ctlx.dinc = 0;
666 if (mids->dma_slave.direction == DMA_TO_DEVICE) {
667 ctl_lo.ctlx.sinc = 0;
668 ctl_lo.ctlx.dinc = 2;
669 ctl_lo.ctlx.tt_fc = 1;
670 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
671 ctl_lo.ctlx.sinc = 2;
672 ctl_lo.ctlx.dinc = 0;
673 ctl_lo.ctlx.tt_fc = 2;
677 pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
678 ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
680 enable_dma_interrupt(midc);
682 desc = midc_desc_get(midc);
688 desc->cfg_hi = cfg_hi.cfg_hi;
689 desc->cfg_lo = cfg_lo.cfg_lo;
690 desc->ctl_lo = ctl_lo.ctl_lo;
691 desc->ctl_hi = ctl_hi.ctl_hi;
693 desc->dirn = mids->dma_slave.direction;
696 desc->lli_pool = NULL;
700 pr_err("ERR_MDMA: Failed to get desc\n");
701 midc_desc_put(midc, desc);
705 * intel_mid_dma_prep_slave_sg - Prep slave sg txn
706 * @chan: chan for DMA transfer
707 * @sgl: scatter gather list
708 * @sg_len: length of sg txn
709 * @direction: DMA transfer dirtn
711 * @context: transfer context (ignored)
713 * Prepares LLI based periphral transfer
715 static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
716 struct dma_chan *chan, struct scatterlist *sgl,
717 unsigned int sg_len, enum dma_transfer_direction direction,
718 unsigned long flags, void *context)
720 struct intel_mid_dma_chan *midc = NULL;
721 struct intel_mid_dma_slave *mids = NULL;
722 struct intel_mid_dma_desc *desc = NULL;
723 struct dma_async_tx_descriptor *txd = NULL;
724 union intel_mid_dma_ctl_lo ctl_lo;
726 pr_debug("MDMA: Prep for slave SG\n");
729 pr_err("MDMA: Invalid SG length\n");
732 midc = to_intel_mid_dma_chan(chan);
735 mids = midc->mid_slave;
738 if (!midc->dma->pimr_mask) {
739 /* We can still handle sg list with only one item */
741 txd = intel_mid_dma_prep_memcpy(chan,
742 mids->dma_slave.dst_addr,
743 mids->dma_slave.src_addr,
748 pr_warn("MDMA: SG list is not supported by this controller\n");
753 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
754 sg_len, direction, flags);
756 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
758 pr_err("MDMA: Prep memcpy failed\n");
762 desc = to_intel_mid_dma_desc(txd);
763 desc->dirn = direction;
764 ctl_lo.ctl_lo = desc->ctl_lo;
765 ctl_lo.ctlx.llp_dst_en = 1;
766 ctl_lo.ctlx.llp_src_en = 1;
767 desc->ctl_lo = ctl_lo.ctl_lo;
768 desc->lli_length = sg_len;
769 desc->current_lli = 0;
770 /* DMA coherent memory pool for LLI descriptors*/
771 desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
773 (sizeof(struct intel_mid_dma_lli)*sg_len),
775 if (NULL == desc->lli_pool) {
776 pr_err("MID_DMA:LLI pool create failed\n");
780 desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
782 pr_err("MID_DMA: LLI alloc failed\n");
783 pci_pool_destroy(desc->lli_pool);
787 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
788 if (flags & DMA_PREP_INTERRUPT) {
789 iowrite32(UNMASK_INTR_REG(midc->ch_id),
790 midc->dma_base + MASK_BLOCK);
791 pr_debug("MDMA:Enabled Block interrupt\n");
797 * intel_mid_dma_free_chan_resources - Frees dma resources
798 * @chan: chan requiring attention
800 * Frees the allocated resources on this DMA chan
802 static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
804 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
805 struct middma_device *mid = to_middma_device(chan->device);
806 struct intel_mid_dma_desc *desc, *_desc;
808 if (true == midc->busy) {
809 /*trying to free ch in use!!!!!*/
810 pr_err("ERR_MDMA: trying to free ch in use\n");
812 pm_runtime_put(&mid->pdev->dev);
813 spin_lock_bh(&midc->lock);
814 midc->descs_allocated = 0;
815 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
816 list_del(&desc->desc_node);
817 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
819 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
820 list_del(&desc->desc_node);
821 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
823 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
824 list_del(&desc->desc_node);
825 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
827 spin_unlock_bh(&midc->lock);
828 midc->in_use = false;
830 /* Disable CH interrupts */
831 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
832 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
836 * intel_mid_dma_alloc_chan_resources - Allocate dma resources
837 * @chan: chan requiring attention
839 * Allocates DMA resources on this chan
840 * Return the descriptors allocated
842 static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
844 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
845 struct middma_device *mid = to_middma_device(chan->device);
846 struct intel_mid_dma_desc *desc;
850 pm_runtime_get_sync(&mid->pdev->dev);
852 if (mid->state == SUSPENDED) {
853 if (dma_resume(mid->pdev)) {
854 pr_err("ERR_MDMA: resume failed");
859 /* ASSERT: channel is idle */
860 if (test_ch_en(mid->dma_base, midc->ch_id)) {
862 pr_err("ERR_MDMA: ch not idle\n");
863 pm_runtime_put(&mid->pdev->dev);
866 dma_cookie_init(chan);
868 spin_lock_bh(&midc->lock);
869 while (midc->descs_allocated < DESCS_PER_CHANNEL) {
870 spin_unlock_bh(&midc->lock);
871 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
873 pr_err("ERR_MDMA: desc failed\n");
874 pm_runtime_put(&mid->pdev->dev);
878 dma_async_tx_descriptor_init(&desc->txd, chan);
879 desc->txd.tx_submit = intel_mid_dma_tx_submit;
880 desc->txd.flags = DMA_CTRL_ACK;
881 desc->txd.phys = phys;
882 spin_lock_bh(&midc->lock);
883 i = ++midc->descs_allocated;
884 list_add_tail(&desc->desc_node, &midc->free_list);
886 spin_unlock_bh(&midc->lock);
889 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
894 * midc_handle_error - Handle DMA txn error
895 * @mid: controller where error occurred
896 * @midc: chan where error occurred
898 * Scan the descriptor for error
900 static void midc_handle_error(struct middma_device *mid,
901 struct intel_mid_dma_chan *midc)
903 midc_scan_descriptors(mid, midc);
907 * dma_tasklet - DMA interrupt tasklet
908 * @data: tasklet arg (the controller structure)
910 * Scan the controller for interrupts for completion/error
911 * Clear the interrupt and call for handling completion/error
913 static void dma_tasklet(unsigned long data)
915 struct middma_device *mid = NULL;
916 struct intel_mid_dma_chan *midc = NULL;
917 u32 status, raw_tfr, raw_block;
920 mid = (struct middma_device *)data;
922 pr_err("ERR_MDMA: tasklet Null param\n");
925 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
926 raw_tfr = ioread32(mid->dma_base + RAW_TFR);
927 raw_block = ioread32(mid->dma_base + RAW_BLOCK);
928 status = raw_tfr | raw_block;
929 status &= mid->intr_mask;
932 i = get_ch_index(&status, mid->chan_base);
934 pr_err("ERR_MDMA:Invalid ch index %x\n", i);
939 pr_err("ERR_MDMA:Null param midc\n");
942 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
943 status, midc->ch_id, i);
944 midc->raw_tfr = raw_tfr;
945 midc->raw_block = raw_block;
946 spin_lock_bh(&midc->lock);
947 /*clearing this interrupts first*/
948 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
950 iowrite32((1 << midc->ch_id),
951 mid->dma_base + CLEAR_BLOCK);
953 midc_scan_descriptors(mid, midc);
954 pr_debug("MDMA:Scan of desc... complete, unmasking\n");
955 iowrite32(UNMASK_INTR_REG(midc->ch_id),
956 mid->dma_base + MASK_TFR);
958 iowrite32(UNMASK_INTR_REG(midc->ch_id),
959 mid->dma_base + MASK_BLOCK);
961 spin_unlock_bh(&midc->lock);
964 status = ioread32(mid->dma_base + RAW_ERR);
965 status &= mid->intr_mask;
968 i = get_ch_index(&status, mid->chan_base);
970 pr_err("ERR_MDMA:Invalid ch index %x\n", i);
975 pr_err("ERR_MDMA:Null param midc\n");
978 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
979 status, midc->ch_id, i);
981 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
982 spin_lock_bh(&midc->lock);
983 midc_handle_error(mid, midc);
984 iowrite32(UNMASK_INTR_REG(midc->ch_id),
985 mid->dma_base + MASK_ERR);
986 spin_unlock_bh(&midc->lock);
988 pr_debug("MDMA:Exiting takslet...\n");
992 static void dma_tasklet1(unsigned long data)
994 pr_debug("MDMA:in takslet1...\n");
995 return dma_tasklet(data);
998 static void dma_tasklet2(unsigned long data)
1000 pr_debug("MDMA:in takslet2...\n");
1001 return dma_tasklet(data);
1005 * intel_mid_dma_interrupt - DMA ISR
1006 * @irq: IRQ where interrupt occurred
1007 * @data: ISR cllback data (the controller structure)
1009 * See if this is our interrupt if so then schedule the tasklet
1012 static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
1014 struct middma_device *mid = data;
1015 u32 tfr_status, err_status;
1016 int call_tasklet = 0;
1018 tfr_status = ioread32(mid->dma_base + RAW_TFR);
1019 err_status = ioread32(mid->dma_base + RAW_ERR);
1020 if (!tfr_status && !err_status)
1024 pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
1025 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
1026 tfr_status &= mid->intr_mask;
1028 /*need to disable intr*/
1029 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
1030 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
1031 pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
1034 err_status &= mid->intr_mask;
1036 iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR);
1040 tasklet_schedule(&mid->tasklet);
1045 static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data)
1047 return intel_mid_dma_interrupt(irq, data);
1050 static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data)
1052 return intel_mid_dma_interrupt(irq, data);
1056 * mid_setup_dma - Setup the DMA controller
1057 * @pdev: Controller PCI device structure
1059 * Initialize the DMA controller, channels, registers with DMA engine,
1060 * ISR. Initialize DMA controller channels.
1062 static int mid_setup_dma(struct pci_dev *pdev)
1064 struct middma_device *dma = pci_get_drvdata(pdev);
1067 /* DMA coherent memory pool for DMA descriptor allocations */
1068 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
1069 sizeof(struct intel_mid_dma_desc),
1071 if (NULL == dma->dma_pool) {
1072 pr_err("ERR_MDMA:pci_pool_create failed\n");
1077 INIT_LIST_HEAD(&dma->common.channels);
1078 dma->pci_id = pdev->device;
1079 if (dma->pimr_mask) {
1080 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
1081 LNW_PERIPHRAL_MASK_SIZE);
1082 if (dma->mask_reg == NULL) {
1083 pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
1087 dma->mask_reg = NULL;
1089 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
1090 /*init CH structures*/
1092 dma->state = RUNNING;
1093 for (i = 0; i < dma->max_chan; i++) {
1094 struct intel_mid_dma_chan *midch = &dma->ch[i];
1096 midch->chan.device = &dma->common;
1097 midch->chan.chan_id = i;
1098 dma_cookie_init(&midch->chan);
1099 midch->ch_id = dma->chan_base + i;
1100 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
1102 midch->dma_base = dma->dma_base;
1103 midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id;
1105 dma->intr_mask |= 1 << (dma->chan_base + i);
1106 spin_lock_init(&midch->lock);
1108 INIT_LIST_HEAD(&midch->active_list);
1109 INIT_LIST_HEAD(&midch->queue);
1110 INIT_LIST_HEAD(&midch->free_list);
1112 iowrite32(MASK_INTR_REG(midch->ch_id),
1113 dma->dma_base + MASK_BLOCK);
1114 iowrite32(MASK_INTR_REG(midch->ch_id),
1115 dma->dma_base + MASK_SRC_TRAN);
1116 iowrite32(MASK_INTR_REG(midch->ch_id),
1117 dma->dma_base + MASK_DST_TRAN);
1118 iowrite32(MASK_INTR_REG(midch->ch_id),
1119 dma->dma_base + MASK_ERR);
1120 iowrite32(MASK_INTR_REG(midch->ch_id),
1121 dma->dma_base + MASK_TFR);
1123 disable_dma_interrupt(midch);
1124 list_add_tail(&midch->chan.device_node, &dma->common.channels);
1126 pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask);
1128 /*init dma structure*/
1129 dma_cap_zero(dma->common.cap_mask);
1130 dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
1131 dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
1132 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
1133 dma->common.dev = &pdev->dev;
1134 dma->common.chancnt = dma->max_chan;
1136 dma->common.device_alloc_chan_resources =
1137 intel_mid_dma_alloc_chan_resources;
1138 dma->common.device_free_chan_resources =
1139 intel_mid_dma_free_chan_resources;
1141 dma->common.device_tx_status = intel_mid_dma_tx_status;
1142 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
1143 dma->common.device_issue_pending = intel_mid_dma_issue_pending;
1144 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
1145 dma->common.device_control = intel_mid_dma_device_control;
1147 /*enable dma cntrl*/
1148 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
1151 if (dma->pimr_mask) {
1152 pr_debug("MDMA:Requesting irq shared for DMAC1\n");
1153 err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
1154 IRQF_SHARED, "INTEL_MID_DMAC1", dma);
1158 dma->intr_mask = 0x03;
1159 pr_debug("MDMA:Requesting irq for DMAC2\n");
1160 err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
1161 IRQF_SHARED, "INTEL_MID_DMAC2", dma);
1165 /*register device w/ engine*/
1166 err = dma_async_device_register(&dma->common);
1168 pr_err("ERR_MDMA:device_register failed: %d\n", err);
1171 if (dma->pimr_mask) {
1172 pr_debug("setting up tasklet1 for DMAC1\n");
1173 tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma);
1175 pr_debug("setting up tasklet2 for DMAC2\n");
1176 tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
1181 free_irq(pdev->irq, dma);
1183 pci_pool_destroy(dma->dma_pool);
1185 pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
1191 * middma_shutdown - Shutdown the DMA controller
1192 * @pdev: Controller PCI device structure
1195 * Unregister DMa controller, clear all structures and free interrupt
1197 static void middma_shutdown(struct pci_dev *pdev)
1199 struct middma_device *device = pci_get_drvdata(pdev);
1201 dma_async_device_unregister(&device->common);
1202 pci_pool_destroy(device->dma_pool);
1203 if (device->mask_reg)
1204 iounmap(device->mask_reg);
1205 if (device->dma_base)
1206 iounmap(device->dma_base);
1207 free_irq(pdev->irq, device);
1212 * intel_mid_dma_probe - PCI Probe
1213 * @pdev: Controller PCI device structure
1214 * @id: pci device id structure
1216 * Initialize the PCI device, map BARs, query driver data.
1217 * Call setup_dma to complete contoller and chan initilzation
1219 static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
1220 const struct pci_device_id *id)
1222 struct middma_device *device;
1223 u32 base_addr, bar_size;
1224 struct intel_mid_dma_probe_info *info;
1227 pr_debug("MDMA: probe for %x\n", pdev->device);
1228 info = (void *)id->driver_data;
1229 pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
1230 info->max_chan, info->ch_base,
1231 info->block_size, info->pimr_mask);
1233 err = pci_enable_device(pdev);
1235 goto err_enable_device;
1237 err = pci_request_regions(pdev, "intel_mid_dmac");
1239 goto err_request_regions;
1241 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1243 goto err_set_dma_mask;
1245 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1247 goto err_set_dma_mask;
1249 device = kzalloc(sizeof(*device), GFP_KERNEL);
1251 pr_err("ERR_MDMA:kzalloc failed probe\n");
1255 device->pdev = pci_dev_get(pdev);
1257 base_addr = pci_resource_start(pdev, 0);
1258 bar_size = pci_resource_len(pdev, 0);
1259 device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
1260 if (!device->dma_base) {
1261 pr_err("ERR_MDMA:ioremap failed\n");
1265 pci_set_drvdata(pdev, device);
1266 pci_set_master(pdev);
1267 device->max_chan = info->max_chan;
1268 device->chan_base = info->ch_base;
1269 device->block_size = info->block_size;
1270 device->pimr_mask = info->pimr_mask;
1272 err = mid_setup_dma(pdev);
1276 pm_runtime_put_noidle(&pdev->dev);
1277 pm_runtime_allow(&pdev->dev);
1281 iounmap(device->dma_base);
1287 pci_release_regions(pdev);
1288 pci_disable_device(pdev);
1289 err_request_regions:
1291 pr_err("ERR_MDMA:Probe failed %d\n", err);
1296 * intel_mid_dma_remove - PCI remove
1297 * @pdev: Controller PCI device structure
1299 * Free up all resources and data
1300 * Call shutdown_dma to complete contoller and chan cleanup
1302 static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
1304 struct middma_device *device = pci_get_drvdata(pdev);
1306 pm_runtime_get_noresume(&pdev->dev);
1307 pm_runtime_forbid(&pdev->dev);
1308 middma_shutdown(pdev);
1311 pci_release_regions(pdev);
1312 pci_disable_device(pdev);
1315 /* Power Management */
1317 * dma_suspend - PCI suspend function
1319 * @pci: PCI device structure
1320 * @state: PM message
1322 * This function is called by OS when a power event occurs
1324 int dma_suspend(struct pci_dev *pci, pm_message_t state)
1327 struct middma_device *device = pci_get_drvdata(pci);
1328 pr_debug("MDMA: dma_suspend called\n");
1330 for (i = 0; i < device->max_chan; i++) {
1331 if (device->ch[i].in_use)
1334 device->state = SUSPENDED;
1335 pci_save_state(pci);
1336 pci_disable_device(pci);
1337 pci_set_power_state(pci, PCI_D3hot);
1342 * dma_resume - PCI resume function
1344 * @pci: PCI device structure
1346 * This function is called by OS when a power event occurs
1348 int dma_resume(struct pci_dev *pci)
1351 struct middma_device *device = pci_get_drvdata(pci);
1353 pr_debug("MDMA: dma_resume called\n");
1354 pci_set_power_state(pci, PCI_D0);
1355 pci_restore_state(pci);
1356 ret = pci_enable_device(pci);
1358 pr_err("MDMA: device can't be enabled for %x\n", pci->device);
1361 device->state = RUNNING;
1362 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1366 static int dma_runtime_suspend(struct device *dev)
1368 struct pci_dev *pci_dev = to_pci_dev(dev);
1369 struct middma_device *device = pci_get_drvdata(pci_dev);
1371 device->state = SUSPENDED;
1375 static int dma_runtime_resume(struct device *dev)
1377 struct pci_dev *pci_dev = to_pci_dev(dev);
1378 struct middma_device *device = pci_get_drvdata(pci_dev);
1380 device->state = RUNNING;
1381 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1385 static int dma_runtime_idle(struct device *dev)
1387 struct pci_dev *pdev = to_pci_dev(dev);
1388 struct middma_device *device = pci_get_drvdata(pdev);
1391 for (i = 0; i < device->max_chan; i++) {
1392 if (device->ch[i].in_use)
1396 return pm_schedule_suspend(dev, 0);
1399 /******************************************************************************
1402 static struct pci_device_id intel_mid_dma_ids[] = {
1403 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)},
1404 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)},
1405 { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)},
1406 { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)},
1409 MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
1411 static const struct dev_pm_ops intel_mid_dma_pm = {
1412 .runtime_suspend = dma_runtime_suspend,
1413 .runtime_resume = dma_runtime_resume,
1414 .runtime_idle = dma_runtime_idle,
1417 static struct pci_driver intel_mid_dma_pci_driver = {
1418 .name = "Intel MID DMA",
1419 .id_table = intel_mid_dma_ids,
1420 .probe = intel_mid_dma_probe,
1421 .remove = __devexit_p(intel_mid_dma_remove),
1423 .suspend = dma_suspend,
1424 .resume = dma_resume,
1426 .pm = &intel_mid_dma_pm,
1431 static int __init intel_mid_dma_init(void)
1433 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
1434 INTEL_MID_DMA_DRIVER_VERSION);
1435 return pci_register_driver(&intel_mid_dma_pci_driver);
1437 fs_initcall(intel_mid_dma_init);
1439 static void __exit intel_mid_dma_exit(void)
1441 pci_unregister_driver(&intel_mid_dma_pci_driver);
1443 module_exit(intel_mid_dma_exit);
1445 MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
1446 MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
1447 MODULE_LICENSE("GPL v2");
1448 MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);