dmaengine: shdma: Fix up fallout from runtime PM changes.
[linux-2.6.git] / drivers / dma / mv_xor.c
1 /*
2  * offload engine driver for the Marvell XOR engine
3  * Copyright (C) 2007, 2008, Marvell International Ltd.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  */
18
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h>
27 #include <linux/memory.h>
28 #include <plat/mv_xor.h>
29 #include "mv_xor.h"
30
31 static void mv_xor_issue_pending(struct dma_chan *chan);
32
33 #define to_mv_xor_chan(chan)            \
34         container_of(chan, struct mv_xor_chan, common)
35
36 #define to_mv_xor_device(dev)           \
37         container_of(dev, struct mv_xor_device, common)
38
39 #define to_mv_xor_slot(tx)              \
40         container_of(tx, struct mv_xor_desc_slot, async_tx)
41
42 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
43 {
44         struct mv_xor_desc *hw_desc = desc->hw_desc;
45
46         hw_desc->status = (1 << 31);
47         hw_desc->phy_next_desc = 0;
48         hw_desc->desc_command = (1 << 31);
49 }
50
51 static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
52 {
53         struct mv_xor_desc *hw_desc = desc->hw_desc;
54         return hw_desc->phy_dest_addr;
55 }
56
57 static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
58                                 int src_idx)
59 {
60         struct mv_xor_desc *hw_desc = desc->hw_desc;
61         return hw_desc->phy_src_addr[src_idx];
62 }
63
64
65 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
66                                    u32 byte_count)
67 {
68         struct mv_xor_desc *hw_desc = desc->hw_desc;
69         hw_desc->byte_count = byte_count;
70 }
71
72 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
73                                   u32 next_desc_addr)
74 {
75         struct mv_xor_desc *hw_desc = desc->hw_desc;
76         BUG_ON(hw_desc->phy_next_desc);
77         hw_desc->phy_next_desc = next_desc_addr;
78 }
79
80 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
81 {
82         struct mv_xor_desc *hw_desc = desc->hw_desc;
83         hw_desc->phy_next_desc = 0;
84 }
85
86 static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
87 {
88         desc->value = val;
89 }
90
91 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
92                                   dma_addr_t addr)
93 {
94         struct mv_xor_desc *hw_desc = desc->hw_desc;
95         hw_desc->phy_dest_addr = addr;
96 }
97
98 static int mv_chan_memset_slot_count(size_t len)
99 {
100         return 1;
101 }
102
103 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
104
105 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
106                                  int index, dma_addr_t addr)
107 {
108         struct mv_xor_desc *hw_desc = desc->hw_desc;
109         hw_desc->phy_src_addr[index] = addr;
110         if (desc->type == DMA_XOR)
111                 hw_desc->desc_command |= (1 << index);
112 }
113
114 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
115 {
116         return __raw_readl(XOR_CURR_DESC(chan));
117 }
118
119 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
120                                         u32 next_desc_addr)
121 {
122         __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
123 }
124
125 static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
126 {
127         __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
128 }
129
130 static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
131 {
132         __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
133 }
134
135 static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
136 {
137         __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
138         __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
139 }
140
141 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
142 {
143         u32 val = __raw_readl(XOR_INTR_MASK(chan));
144         val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
145         __raw_writel(val, XOR_INTR_MASK(chan));
146 }
147
148 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
149 {
150         u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
151         intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
152         return intr_cause;
153 }
154
155 static int mv_is_err_intr(u32 intr_cause)
156 {
157         if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
158                 return 1;
159
160         return 0;
161 }
162
163 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
164 {
165         u32 val = ~(1 << (chan->idx * 16));
166         dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
167         __raw_writel(val, XOR_INTR_CAUSE(chan));
168 }
169
170 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
171 {
172         u32 val = 0xFFFF0000 >> (chan->idx * 16);
173         __raw_writel(val, XOR_INTR_CAUSE(chan));
174 }
175
176 static int mv_can_chain(struct mv_xor_desc_slot *desc)
177 {
178         struct mv_xor_desc_slot *chain_old_tail = list_entry(
179                 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
180
181         if (chain_old_tail->type != desc->type)
182                 return 0;
183         if (desc->type == DMA_MEMSET)
184                 return 0;
185
186         return 1;
187 }
188
189 static void mv_set_mode(struct mv_xor_chan *chan,
190                                enum dma_transaction_type type)
191 {
192         u32 op_mode;
193         u32 config = __raw_readl(XOR_CONFIG(chan));
194
195         switch (type) {
196         case DMA_XOR:
197                 op_mode = XOR_OPERATION_MODE_XOR;
198                 break;
199         case DMA_MEMCPY:
200                 op_mode = XOR_OPERATION_MODE_MEMCPY;
201                 break;
202         case DMA_MEMSET:
203                 op_mode = XOR_OPERATION_MODE_MEMSET;
204                 break;
205         default:
206                 dev_printk(KERN_ERR, chan->device->common.dev,
207                            "error: unsupported operation %d.\n",
208                            type);
209                 BUG();
210                 return;
211         }
212
213         config &= ~0x7;
214         config |= op_mode;
215         __raw_writel(config, XOR_CONFIG(chan));
216         chan->current_type = type;
217 }
218
219 static void mv_chan_activate(struct mv_xor_chan *chan)
220 {
221         u32 activation;
222
223         dev_dbg(chan->device->common.dev, " activate chan.\n");
224         activation = __raw_readl(XOR_ACTIVATION(chan));
225         activation |= 0x1;
226         __raw_writel(activation, XOR_ACTIVATION(chan));
227 }
228
229 static char mv_chan_is_busy(struct mv_xor_chan *chan)
230 {
231         u32 state = __raw_readl(XOR_ACTIVATION(chan));
232
233         state = (state >> 4) & 0x3;
234
235         return (state == 1) ? 1 : 0;
236 }
237
238 static int mv_chan_xor_slot_count(size_t len, int src_cnt)
239 {
240         return 1;
241 }
242
243 /**
244  * mv_xor_free_slots - flags descriptor slots for reuse
245  * @slot: Slot to free
246  * Caller must hold &mv_chan->lock while calling this function
247  */
248 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
249                               struct mv_xor_desc_slot *slot)
250 {
251         dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
252                 __func__, __LINE__, slot);
253
254         slot->slots_per_op = 0;
255
256 }
257
258 /*
259  * mv_xor_start_new_chain - program the engine to operate on new chain headed by
260  * sw_desc
261  * Caller must hold &mv_chan->lock while calling this function
262  */
263 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
264                                    struct mv_xor_desc_slot *sw_desc)
265 {
266         dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
267                 __func__, __LINE__, sw_desc);
268         if (sw_desc->type != mv_chan->current_type)
269                 mv_set_mode(mv_chan, sw_desc->type);
270
271         if (sw_desc->type == DMA_MEMSET) {
272                 /* for memset requests we need to program the engine, no
273                  * descriptors used.
274                  */
275                 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
276                 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
277                 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
278                 mv_chan_set_value(mv_chan, sw_desc->value);
279         } else {
280                 /* set the hardware chain */
281                 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
282         }
283         mv_chan->pending += sw_desc->slot_cnt;
284         mv_xor_issue_pending(&mv_chan->common);
285 }
286
287 static dma_cookie_t
288 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
289         struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
290 {
291         BUG_ON(desc->async_tx.cookie < 0);
292
293         if (desc->async_tx.cookie > 0) {
294                 cookie = desc->async_tx.cookie;
295
296                 /* call the callback (must not sleep or submit new
297                  * operations to this channel)
298                  */
299                 if (desc->async_tx.callback)
300                         desc->async_tx.callback(
301                                 desc->async_tx.callback_param);
302
303                 /* unmap dma addresses
304                  * (unmap_single vs unmap_page?)
305                  */
306                 if (desc->group_head && desc->unmap_len) {
307                         struct mv_xor_desc_slot *unmap = desc->group_head;
308                         struct device *dev =
309                                 &mv_chan->device->pdev->dev;
310                         u32 len = unmap->unmap_len;
311                         enum dma_ctrl_flags flags = desc->async_tx.flags;
312                         u32 src_cnt;
313                         dma_addr_t addr;
314                         dma_addr_t dest;
315
316                         src_cnt = unmap->unmap_src_cnt;
317                         dest = mv_desc_get_dest_addr(unmap);
318                         if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
319                                 enum dma_data_direction dir;
320
321                                 if (src_cnt > 1) /* is xor ? */
322                                         dir = DMA_BIDIRECTIONAL;
323                                 else
324                                         dir = DMA_FROM_DEVICE;
325                                 dma_unmap_page(dev, dest, len, dir);
326                         }
327
328                         if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
329                                 while (src_cnt--) {
330                                         addr = mv_desc_get_src_addr(unmap,
331                                                                     src_cnt);
332                                         if (addr == dest)
333                                                 continue;
334                                         dma_unmap_page(dev, addr, len,
335                                                        DMA_TO_DEVICE);
336                                 }
337                         }
338                         desc->group_head = NULL;
339                 }
340         }
341
342         /* run dependent operations */
343         dma_run_dependencies(&desc->async_tx);
344
345         return cookie;
346 }
347
348 static int
349 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
350 {
351         struct mv_xor_desc_slot *iter, *_iter;
352
353         dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
354         list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
355                                  completed_node) {
356
357                 if (async_tx_test_ack(&iter->async_tx)) {
358                         list_del(&iter->completed_node);
359                         mv_xor_free_slots(mv_chan, iter);
360                 }
361         }
362         return 0;
363 }
364
365 static int
366 mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
367         struct mv_xor_chan *mv_chan)
368 {
369         dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
370                 __func__, __LINE__, desc, desc->async_tx.flags);
371         list_del(&desc->chain_node);
372         /* the client is allowed to attach dependent operations
373          * until 'ack' is set
374          */
375         if (!async_tx_test_ack(&desc->async_tx)) {
376                 /* move this slot to the completed_slots */
377                 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
378                 return 0;
379         }
380
381         mv_xor_free_slots(mv_chan, desc);
382         return 0;
383 }
384
385 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
386 {
387         struct mv_xor_desc_slot *iter, *_iter;
388         dma_cookie_t cookie = 0;
389         int busy = mv_chan_is_busy(mv_chan);
390         u32 current_desc = mv_chan_get_current_desc(mv_chan);
391         int seen_current = 0;
392
393         dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
394         dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
395         mv_xor_clean_completed_slots(mv_chan);
396
397         /* free completed slots from the chain starting with
398          * the oldest descriptor
399          */
400
401         list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
402                                         chain_node) {
403                 prefetch(_iter);
404                 prefetch(&_iter->async_tx);
405
406                 /* do not advance past the current descriptor loaded into the
407                  * hardware channel, subsequent descriptors are either in
408                  * process or have not been submitted
409                  */
410                 if (seen_current)
411                         break;
412
413                 /* stop the search if we reach the current descriptor and the
414                  * channel is busy
415                  */
416                 if (iter->async_tx.phys == current_desc) {
417                         seen_current = 1;
418                         if (busy)
419                                 break;
420                 }
421
422                 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
423
424                 if (mv_xor_clean_slot(iter, mv_chan))
425                         break;
426         }
427
428         if ((busy == 0) && !list_empty(&mv_chan->chain)) {
429                 struct mv_xor_desc_slot *chain_head;
430                 chain_head = list_entry(mv_chan->chain.next,
431                                         struct mv_xor_desc_slot,
432                                         chain_node);
433
434                 mv_xor_start_new_chain(mv_chan, chain_head);
435         }
436
437         if (cookie > 0)
438                 mv_chan->completed_cookie = cookie;
439 }
440
441 static void
442 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
443 {
444         spin_lock_bh(&mv_chan->lock);
445         __mv_xor_slot_cleanup(mv_chan);
446         spin_unlock_bh(&mv_chan->lock);
447 }
448
449 static void mv_xor_tasklet(unsigned long data)
450 {
451         struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
452         mv_xor_slot_cleanup(chan);
453 }
454
455 static struct mv_xor_desc_slot *
456 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
457                     int slots_per_op)
458 {
459         struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
460         LIST_HEAD(chain);
461         int slots_found, retry = 0;
462
463         /* start search from the last allocated descrtiptor
464          * if a contiguous allocation can not be found start searching
465          * from the beginning of the list
466          */
467 retry:
468         slots_found = 0;
469         if (retry == 0)
470                 iter = mv_chan->last_used;
471         else
472                 iter = list_entry(&mv_chan->all_slots,
473                         struct mv_xor_desc_slot,
474                         slot_node);
475
476         list_for_each_entry_safe_continue(
477                 iter, _iter, &mv_chan->all_slots, slot_node) {
478                 prefetch(_iter);
479                 prefetch(&_iter->async_tx);
480                 if (iter->slots_per_op) {
481                         /* give up after finding the first busy slot
482                          * on the second pass through the list
483                          */
484                         if (retry)
485                                 break;
486
487                         slots_found = 0;
488                         continue;
489                 }
490
491                 /* start the allocation if the slot is correctly aligned */
492                 if (!slots_found++)
493                         alloc_start = iter;
494
495                 if (slots_found == num_slots) {
496                         struct mv_xor_desc_slot *alloc_tail = NULL;
497                         struct mv_xor_desc_slot *last_used = NULL;
498                         iter = alloc_start;
499                         while (num_slots) {
500                                 int i;
501
502                                 /* pre-ack all but the last descriptor */
503                                 async_tx_ack(&iter->async_tx);
504
505                                 list_add_tail(&iter->chain_node, &chain);
506                                 alloc_tail = iter;
507                                 iter->async_tx.cookie = 0;
508                                 iter->slot_cnt = num_slots;
509                                 iter->xor_check_result = NULL;
510                                 for (i = 0; i < slots_per_op; i++) {
511                                         iter->slots_per_op = slots_per_op - i;
512                                         last_used = iter;
513                                         iter = list_entry(iter->slot_node.next,
514                                                 struct mv_xor_desc_slot,
515                                                 slot_node);
516                                 }
517                                 num_slots -= slots_per_op;
518                         }
519                         alloc_tail->group_head = alloc_start;
520                         alloc_tail->async_tx.cookie = -EBUSY;
521                         list_splice(&chain, &alloc_tail->tx_list);
522                         mv_chan->last_used = last_used;
523                         mv_desc_clear_next_desc(alloc_start);
524                         mv_desc_clear_next_desc(alloc_tail);
525                         return alloc_tail;
526                 }
527         }
528         if (!retry++)
529                 goto retry;
530
531         /* try to free some slots if the allocation fails */
532         tasklet_schedule(&mv_chan->irq_tasklet);
533
534         return NULL;
535 }
536
537 static dma_cookie_t
538 mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
539                       struct mv_xor_desc_slot *desc)
540 {
541         dma_cookie_t cookie = mv_chan->common.cookie;
542
543         if (++cookie < 0)
544                 cookie = 1;
545         mv_chan->common.cookie = desc->async_tx.cookie = cookie;
546         return cookie;
547 }
548
549 /************************ DMA engine API functions ****************************/
550 static dma_cookie_t
551 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
552 {
553         struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
554         struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
555         struct mv_xor_desc_slot *grp_start, *old_chain_tail;
556         dma_cookie_t cookie;
557         int new_hw_chain = 1;
558
559         dev_dbg(mv_chan->device->common.dev,
560                 "%s sw_desc %p: async_tx %p\n",
561                 __func__, sw_desc, &sw_desc->async_tx);
562
563         grp_start = sw_desc->group_head;
564
565         spin_lock_bh(&mv_chan->lock);
566         cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
567
568         if (list_empty(&mv_chan->chain))
569                 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
570         else {
571                 new_hw_chain = 0;
572
573                 old_chain_tail = list_entry(mv_chan->chain.prev,
574                                             struct mv_xor_desc_slot,
575                                             chain_node);
576                 list_splice_init(&grp_start->tx_list,
577                                  &old_chain_tail->chain_node);
578
579                 if (!mv_can_chain(grp_start))
580                         goto submit_done;
581
582                 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
583                         old_chain_tail->async_tx.phys);
584
585                 /* fix up the hardware chain */
586                 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
587
588                 /* if the channel is not busy */
589                 if (!mv_chan_is_busy(mv_chan)) {
590                         u32 current_desc = mv_chan_get_current_desc(mv_chan);
591                         /*
592                          * and the curren desc is the end of the chain before
593                          * the append, then we need to start the channel
594                          */
595                         if (current_desc == old_chain_tail->async_tx.phys)
596                                 new_hw_chain = 1;
597                 }
598         }
599
600         if (new_hw_chain)
601                 mv_xor_start_new_chain(mv_chan, grp_start);
602
603 submit_done:
604         spin_unlock_bh(&mv_chan->lock);
605
606         return cookie;
607 }
608
609 /* returns the number of allocated descriptors */
610 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
611 {
612         char *hw_desc;
613         int idx;
614         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
615         struct mv_xor_desc_slot *slot = NULL;
616         struct mv_xor_platform_data *plat_data =
617                 mv_chan->device->pdev->dev.platform_data;
618         int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
619
620         /* Allocate descriptor slots */
621         idx = mv_chan->slots_allocated;
622         while (idx < num_descs_in_pool) {
623                 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
624                 if (!slot) {
625                         printk(KERN_INFO "MV XOR Channel only initialized"
626                                 " %d descriptor slots", idx);
627                         break;
628                 }
629                 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
630                 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
631
632                 dma_async_tx_descriptor_init(&slot->async_tx, chan);
633                 slot->async_tx.tx_submit = mv_xor_tx_submit;
634                 INIT_LIST_HEAD(&slot->chain_node);
635                 INIT_LIST_HEAD(&slot->slot_node);
636                 INIT_LIST_HEAD(&slot->tx_list);
637                 hw_desc = (char *) mv_chan->device->dma_desc_pool;
638                 slot->async_tx.phys =
639                         (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
640                 slot->idx = idx++;
641
642                 spin_lock_bh(&mv_chan->lock);
643                 mv_chan->slots_allocated = idx;
644                 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
645                 spin_unlock_bh(&mv_chan->lock);
646         }
647
648         if (mv_chan->slots_allocated && !mv_chan->last_used)
649                 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
650                                         struct mv_xor_desc_slot,
651                                         slot_node);
652
653         dev_dbg(mv_chan->device->common.dev,
654                 "allocated %d descriptor slots last_used: %p\n",
655                 mv_chan->slots_allocated, mv_chan->last_used);
656
657         return mv_chan->slots_allocated ? : -ENOMEM;
658 }
659
660 static struct dma_async_tx_descriptor *
661 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
662                 size_t len, unsigned long flags)
663 {
664         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
665         struct mv_xor_desc_slot *sw_desc, *grp_start;
666         int slot_cnt;
667
668         dev_dbg(mv_chan->device->common.dev,
669                 "%s dest: %x src %x len: %u flags: %ld\n",
670                 __func__, dest, src, len, flags);
671         if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
672                 return NULL;
673
674         BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
675
676         spin_lock_bh(&mv_chan->lock);
677         slot_cnt = mv_chan_memcpy_slot_count(len);
678         sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
679         if (sw_desc) {
680                 sw_desc->type = DMA_MEMCPY;
681                 sw_desc->async_tx.flags = flags;
682                 grp_start = sw_desc->group_head;
683                 mv_desc_init(grp_start, flags);
684                 mv_desc_set_byte_count(grp_start, len);
685                 mv_desc_set_dest_addr(sw_desc->group_head, dest);
686                 mv_desc_set_src_addr(grp_start, 0, src);
687                 sw_desc->unmap_src_cnt = 1;
688                 sw_desc->unmap_len = len;
689         }
690         spin_unlock_bh(&mv_chan->lock);
691
692         dev_dbg(mv_chan->device->common.dev,
693                 "%s sw_desc %p async_tx %p\n",
694                 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
695
696         return sw_desc ? &sw_desc->async_tx : NULL;
697 }
698
699 static struct dma_async_tx_descriptor *
700 mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
701                        size_t len, unsigned long flags)
702 {
703         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
704         struct mv_xor_desc_slot *sw_desc, *grp_start;
705         int slot_cnt;
706
707         dev_dbg(mv_chan->device->common.dev,
708                 "%s dest: %x len: %u flags: %ld\n",
709                 __func__, dest, len, flags);
710         if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
711                 return NULL;
712
713         BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
714
715         spin_lock_bh(&mv_chan->lock);
716         slot_cnt = mv_chan_memset_slot_count(len);
717         sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
718         if (sw_desc) {
719                 sw_desc->type = DMA_MEMSET;
720                 sw_desc->async_tx.flags = flags;
721                 grp_start = sw_desc->group_head;
722                 mv_desc_init(grp_start, flags);
723                 mv_desc_set_byte_count(grp_start, len);
724                 mv_desc_set_dest_addr(sw_desc->group_head, dest);
725                 mv_desc_set_block_fill_val(grp_start, value);
726                 sw_desc->unmap_src_cnt = 1;
727                 sw_desc->unmap_len = len;
728         }
729         spin_unlock_bh(&mv_chan->lock);
730         dev_dbg(mv_chan->device->common.dev,
731                 "%s sw_desc %p async_tx %p \n",
732                 __func__, sw_desc, &sw_desc->async_tx);
733         return sw_desc ? &sw_desc->async_tx : NULL;
734 }
735
736 static struct dma_async_tx_descriptor *
737 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
738                     unsigned int src_cnt, size_t len, unsigned long flags)
739 {
740         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
741         struct mv_xor_desc_slot *sw_desc, *grp_start;
742         int slot_cnt;
743
744         if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
745                 return NULL;
746
747         BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
748
749         dev_dbg(mv_chan->device->common.dev,
750                 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
751                 __func__, src_cnt, len, dest, flags);
752
753         spin_lock_bh(&mv_chan->lock);
754         slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
755         sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
756         if (sw_desc) {
757                 sw_desc->type = DMA_XOR;
758                 sw_desc->async_tx.flags = flags;
759                 grp_start = sw_desc->group_head;
760                 mv_desc_init(grp_start, flags);
761                 /* the byte count field is the same as in memcpy desc*/
762                 mv_desc_set_byte_count(grp_start, len);
763                 mv_desc_set_dest_addr(sw_desc->group_head, dest);
764                 sw_desc->unmap_src_cnt = src_cnt;
765                 sw_desc->unmap_len = len;
766                 while (src_cnt--)
767                         mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
768         }
769         spin_unlock_bh(&mv_chan->lock);
770         dev_dbg(mv_chan->device->common.dev,
771                 "%s sw_desc %p async_tx %p \n",
772                 __func__, sw_desc, &sw_desc->async_tx);
773         return sw_desc ? &sw_desc->async_tx : NULL;
774 }
775
776 static void mv_xor_free_chan_resources(struct dma_chan *chan)
777 {
778         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
779         struct mv_xor_desc_slot *iter, *_iter;
780         int in_use_descs = 0;
781
782         mv_xor_slot_cleanup(mv_chan);
783
784         spin_lock_bh(&mv_chan->lock);
785         list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
786                                         chain_node) {
787                 in_use_descs++;
788                 list_del(&iter->chain_node);
789         }
790         list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
791                                  completed_node) {
792                 in_use_descs++;
793                 list_del(&iter->completed_node);
794         }
795         list_for_each_entry_safe_reverse(
796                 iter, _iter, &mv_chan->all_slots, slot_node) {
797                 list_del(&iter->slot_node);
798                 kfree(iter);
799                 mv_chan->slots_allocated--;
800         }
801         mv_chan->last_used = NULL;
802
803         dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
804                 __func__, mv_chan->slots_allocated);
805         spin_unlock_bh(&mv_chan->lock);
806
807         if (in_use_descs)
808                 dev_err(mv_chan->device->common.dev,
809                         "freeing %d in use descriptors!\n", in_use_descs);
810 }
811
812 /**
813  * mv_xor_status - poll the status of an XOR transaction
814  * @chan: XOR channel handle
815  * @cookie: XOR transaction identifier
816  * @txstate: XOR transactions state holder (or NULL)
817  */
818 static enum dma_status mv_xor_status(struct dma_chan *chan,
819                                           dma_cookie_t cookie,
820                                           struct dma_tx_state *txstate)
821 {
822         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
823         dma_cookie_t last_used;
824         dma_cookie_t last_complete;
825         enum dma_status ret;
826
827         last_used = chan->cookie;
828         last_complete = mv_chan->completed_cookie;
829         mv_chan->is_complete_cookie = cookie;
830         dma_set_tx_state(txstate, last_complete, last_used, 0);
831
832         ret = dma_async_is_complete(cookie, last_complete, last_used);
833         if (ret == DMA_SUCCESS) {
834                 mv_xor_clean_completed_slots(mv_chan);
835                 return ret;
836         }
837         mv_xor_slot_cleanup(mv_chan);
838
839         last_used = chan->cookie;
840         last_complete = mv_chan->completed_cookie;
841
842         dma_set_tx_state(txstate, last_complete, last_used, 0);
843         return dma_async_is_complete(cookie, last_complete, last_used);
844 }
845
846 static void mv_dump_xor_regs(struct mv_xor_chan *chan)
847 {
848         u32 val;
849
850         val = __raw_readl(XOR_CONFIG(chan));
851         dev_printk(KERN_ERR, chan->device->common.dev,
852                    "config       0x%08x.\n", val);
853
854         val = __raw_readl(XOR_ACTIVATION(chan));
855         dev_printk(KERN_ERR, chan->device->common.dev,
856                    "activation   0x%08x.\n", val);
857
858         val = __raw_readl(XOR_INTR_CAUSE(chan));
859         dev_printk(KERN_ERR, chan->device->common.dev,
860                    "intr cause   0x%08x.\n", val);
861
862         val = __raw_readl(XOR_INTR_MASK(chan));
863         dev_printk(KERN_ERR, chan->device->common.dev,
864                    "intr mask    0x%08x.\n", val);
865
866         val = __raw_readl(XOR_ERROR_CAUSE(chan));
867         dev_printk(KERN_ERR, chan->device->common.dev,
868                    "error cause  0x%08x.\n", val);
869
870         val = __raw_readl(XOR_ERROR_ADDR(chan));
871         dev_printk(KERN_ERR, chan->device->common.dev,
872                    "error addr   0x%08x.\n", val);
873 }
874
875 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
876                                          u32 intr_cause)
877 {
878         if (intr_cause & (1 << 4)) {
879              dev_dbg(chan->device->common.dev,
880                      "ignore this error\n");
881              return;
882         }
883
884         dev_printk(KERN_ERR, chan->device->common.dev,
885                    "error on chan %d. intr cause 0x%08x.\n",
886                    chan->idx, intr_cause);
887
888         mv_dump_xor_regs(chan);
889         BUG();
890 }
891
892 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
893 {
894         struct mv_xor_chan *chan = data;
895         u32 intr_cause = mv_chan_get_intr_cause(chan);
896
897         dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
898
899         if (mv_is_err_intr(intr_cause))
900                 mv_xor_err_interrupt_handler(chan, intr_cause);
901
902         tasklet_schedule(&chan->irq_tasklet);
903
904         mv_xor_device_clear_eoc_cause(chan);
905
906         return IRQ_HANDLED;
907 }
908
909 static void mv_xor_issue_pending(struct dma_chan *chan)
910 {
911         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
912
913         if (mv_chan->pending >= MV_XOR_THRESHOLD) {
914                 mv_chan->pending = 0;
915                 mv_chan_activate(mv_chan);
916         }
917 }
918
919 /*
920  * Perform a transaction to verify the HW works.
921  */
922 #define MV_XOR_TEST_SIZE 2000
923
924 static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
925 {
926         int i;
927         void *src, *dest;
928         dma_addr_t src_dma, dest_dma;
929         struct dma_chan *dma_chan;
930         dma_cookie_t cookie;
931         struct dma_async_tx_descriptor *tx;
932         int err = 0;
933         struct mv_xor_chan *mv_chan;
934
935         src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
936         if (!src)
937                 return -ENOMEM;
938
939         dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
940         if (!dest) {
941                 kfree(src);
942                 return -ENOMEM;
943         }
944
945         /* Fill in src buffer */
946         for (i = 0; i < MV_XOR_TEST_SIZE; i++)
947                 ((u8 *) src)[i] = (u8)i;
948
949         /* Start copy, using first DMA channel */
950         dma_chan = container_of(device->common.channels.next,
951                                 struct dma_chan,
952                                 device_node);
953         if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
954                 err = -ENODEV;
955                 goto out;
956         }
957
958         dest_dma = dma_map_single(dma_chan->device->dev, dest,
959                                   MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
960
961         src_dma = dma_map_single(dma_chan->device->dev, src,
962                                  MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
963
964         tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
965                                     MV_XOR_TEST_SIZE, 0);
966         cookie = mv_xor_tx_submit(tx);
967         mv_xor_issue_pending(dma_chan);
968         async_tx_ack(tx);
969         msleep(1);
970
971         if (mv_xor_status(dma_chan, cookie, NULL) !=
972             DMA_SUCCESS) {
973                 dev_printk(KERN_ERR, dma_chan->device->dev,
974                            "Self-test copy timed out, disabling\n");
975                 err = -ENODEV;
976                 goto free_resources;
977         }
978
979         mv_chan = to_mv_xor_chan(dma_chan);
980         dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
981                                 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
982         if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
983                 dev_printk(KERN_ERR, dma_chan->device->dev,
984                            "Self-test copy failed compare, disabling\n");
985                 err = -ENODEV;
986                 goto free_resources;
987         }
988
989 free_resources:
990         mv_xor_free_chan_resources(dma_chan);
991 out:
992         kfree(src);
993         kfree(dest);
994         return err;
995 }
996
997 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
998 static int __devinit
999 mv_xor_xor_self_test(struct mv_xor_device *device)
1000 {
1001         int i, src_idx;
1002         struct page *dest;
1003         struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
1004         dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
1005         dma_addr_t dest_dma;
1006         struct dma_async_tx_descriptor *tx;
1007         struct dma_chan *dma_chan;
1008         dma_cookie_t cookie;
1009         u8 cmp_byte = 0;
1010         u32 cmp_word;
1011         int err = 0;
1012         struct mv_xor_chan *mv_chan;
1013
1014         for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1015                 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1016                 if (!xor_srcs[src_idx]) {
1017                         while (src_idx--)
1018                                 __free_page(xor_srcs[src_idx]);
1019                         return -ENOMEM;
1020                 }
1021         }
1022
1023         dest = alloc_page(GFP_KERNEL);
1024         if (!dest) {
1025                 while (src_idx--)
1026                         __free_page(xor_srcs[src_idx]);
1027                 return -ENOMEM;
1028         }
1029
1030         /* Fill in src buffers */
1031         for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1032                 u8 *ptr = page_address(xor_srcs[src_idx]);
1033                 for (i = 0; i < PAGE_SIZE; i++)
1034                         ptr[i] = (1 << src_idx);
1035         }
1036
1037         for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1038                 cmp_byte ^= (u8) (1 << src_idx);
1039
1040         cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1041                 (cmp_byte << 8) | cmp_byte;
1042
1043         memset(page_address(dest), 0, PAGE_SIZE);
1044
1045         dma_chan = container_of(device->common.channels.next,
1046                                 struct dma_chan,
1047                                 device_node);
1048         if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
1049                 err = -ENODEV;
1050                 goto out;
1051         }
1052
1053         /* test xor */
1054         dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1055                                 DMA_FROM_DEVICE);
1056
1057         for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1058                 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1059                                            0, PAGE_SIZE, DMA_TO_DEVICE);
1060
1061         tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1062                                  MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1063
1064         cookie = mv_xor_tx_submit(tx);
1065         mv_xor_issue_pending(dma_chan);
1066         async_tx_ack(tx);
1067         msleep(8);
1068
1069         if (mv_xor_status(dma_chan, cookie, NULL) !=
1070             DMA_SUCCESS) {
1071                 dev_printk(KERN_ERR, dma_chan->device->dev,
1072                            "Self-test xor timed out, disabling\n");
1073                 err = -ENODEV;
1074                 goto free_resources;
1075         }
1076
1077         mv_chan = to_mv_xor_chan(dma_chan);
1078         dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1079                                 PAGE_SIZE, DMA_FROM_DEVICE);
1080         for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1081                 u32 *ptr = page_address(dest);
1082                 if (ptr[i] != cmp_word) {
1083                         dev_printk(KERN_ERR, dma_chan->device->dev,
1084                                    "Self-test xor failed compare, disabling."
1085                                    " index %d, data %x, expected %x\n", i,
1086                                    ptr[i], cmp_word);
1087                         err = -ENODEV;
1088                         goto free_resources;
1089                 }
1090         }
1091
1092 free_resources:
1093         mv_xor_free_chan_resources(dma_chan);
1094 out:
1095         src_idx = MV_XOR_NUM_SRC_TEST;
1096         while (src_idx--)
1097                 __free_page(xor_srcs[src_idx]);
1098         __free_page(dest);
1099         return err;
1100 }
1101
1102 static int __devexit mv_xor_remove(struct platform_device *dev)
1103 {
1104         struct mv_xor_device *device = platform_get_drvdata(dev);
1105         struct dma_chan *chan, *_chan;
1106         struct mv_xor_chan *mv_chan;
1107         struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
1108
1109         dma_async_device_unregister(&device->common);
1110
1111         dma_free_coherent(&dev->dev, plat_data->pool_size,
1112                         device->dma_desc_pool_virt, device->dma_desc_pool);
1113
1114         list_for_each_entry_safe(chan, _chan, &device->common.channels,
1115                                 device_node) {
1116                 mv_chan = to_mv_xor_chan(chan);
1117                 list_del(&chan->device_node);
1118         }
1119
1120         return 0;
1121 }
1122
1123 static int __devinit mv_xor_probe(struct platform_device *pdev)
1124 {
1125         int ret = 0;
1126         int irq;
1127         struct mv_xor_device *adev;
1128         struct mv_xor_chan *mv_chan;
1129         struct dma_device *dma_dev;
1130         struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1131
1132
1133         adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
1134         if (!adev)
1135                 return -ENOMEM;
1136
1137         dma_dev = &adev->common;
1138
1139         /* allocate coherent memory for hardware descriptors
1140          * note: writecombine gives slightly better performance, but
1141          * requires that we explicitly flush the writes
1142          */
1143         adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1144                                                           plat_data->pool_size,
1145                                                           &adev->dma_desc_pool,
1146                                                           GFP_KERNEL);
1147         if (!adev->dma_desc_pool_virt)
1148                 return -ENOMEM;
1149
1150         adev->id = plat_data->hw_id;
1151
1152         /* discover transaction capabilites from the platform data */
1153         dma_dev->cap_mask = plat_data->cap_mask;
1154         adev->pdev = pdev;
1155         platform_set_drvdata(pdev, adev);
1156
1157         adev->shared = platform_get_drvdata(plat_data->shared);
1158
1159         INIT_LIST_HEAD(&dma_dev->channels);
1160
1161         /* set base routines */
1162         dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1163         dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1164         dma_dev->device_tx_status = mv_xor_status;
1165         dma_dev->device_issue_pending = mv_xor_issue_pending;
1166         dma_dev->dev = &pdev->dev;
1167
1168         /* set prep routines based on capability */
1169         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1170                 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1171         if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1172                 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1173         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1174                 dma_dev->max_xor = 8;
1175                 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1176         }
1177
1178         mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1179         if (!mv_chan) {
1180                 ret = -ENOMEM;
1181                 goto err_free_dma;
1182         }
1183         mv_chan->device = adev;
1184         mv_chan->idx = plat_data->hw_id;
1185         mv_chan->mmr_base = adev->shared->xor_base;
1186
1187         if (!mv_chan->mmr_base) {
1188                 ret = -ENOMEM;
1189                 goto err_free_dma;
1190         }
1191         tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1192                      mv_chan);
1193
1194         /* clear errors before enabling interrupts */
1195         mv_xor_device_clear_err_status(mv_chan);
1196
1197         irq = platform_get_irq(pdev, 0);
1198         if (irq < 0) {
1199                 ret = irq;
1200                 goto err_free_dma;
1201         }
1202         ret = devm_request_irq(&pdev->dev, irq,
1203                                mv_xor_interrupt_handler,
1204                                0, dev_name(&pdev->dev), mv_chan);
1205         if (ret)
1206                 goto err_free_dma;
1207
1208         mv_chan_unmask_interrupts(mv_chan);
1209
1210         mv_set_mode(mv_chan, DMA_MEMCPY);
1211
1212         spin_lock_init(&mv_chan->lock);
1213         INIT_LIST_HEAD(&mv_chan->chain);
1214         INIT_LIST_HEAD(&mv_chan->completed_slots);
1215         INIT_LIST_HEAD(&mv_chan->all_slots);
1216         mv_chan->common.device = dma_dev;
1217
1218         list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
1219
1220         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1221                 ret = mv_xor_memcpy_self_test(adev);
1222                 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1223                 if (ret)
1224                         goto err_free_dma;
1225         }
1226
1227         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1228                 ret = mv_xor_xor_self_test(adev);
1229                 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1230                 if (ret)
1231                         goto err_free_dma;
1232         }
1233
1234         dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
1235           "( %s%s%s%s)\n",
1236           dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1237           dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)  ? "fill " : "",
1238           dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1239           dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1240
1241         dma_async_device_register(dma_dev);
1242         goto out;
1243
1244  err_free_dma:
1245         dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1246                         adev->dma_desc_pool_virt, adev->dma_desc_pool);
1247  out:
1248         return ret;
1249 }
1250
1251 static void
1252 mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
1253                          struct mbus_dram_target_info *dram)
1254 {
1255         void __iomem *base = msp->xor_base;
1256         u32 win_enable = 0;
1257         int i;
1258
1259         for (i = 0; i < 8; i++) {
1260                 writel(0, base + WINDOW_BASE(i));
1261                 writel(0, base + WINDOW_SIZE(i));
1262                 if (i < 4)
1263                         writel(0, base + WINDOW_REMAP_HIGH(i));
1264         }
1265
1266         for (i = 0; i < dram->num_cs; i++) {
1267                 struct mbus_dram_window *cs = dram->cs + i;
1268
1269                 writel((cs->base & 0xffff0000) |
1270                        (cs->mbus_attr << 8) |
1271                        dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1272                 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1273
1274                 win_enable |= (1 << i);
1275                 win_enable |= 3 << (16 + (2 * i));
1276         }
1277
1278         writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1279         writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1280 }
1281
1282 static struct platform_driver mv_xor_driver = {
1283         .probe          = mv_xor_probe,
1284         .remove         = __devexit_p(mv_xor_remove),
1285         .driver         = {
1286                 .owner  = THIS_MODULE,
1287                 .name   = MV_XOR_NAME,
1288         },
1289 };
1290
1291 static int mv_xor_shared_probe(struct platform_device *pdev)
1292 {
1293         struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data;
1294         struct mv_xor_shared_private *msp;
1295         struct resource *res;
1296
1297         dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
1298
1299         msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
1300         if (!msp)
1301                 return -ENOMEM;
1302
1303         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1304         if (!res)
1305                 return -ENODEV;
1306
1307         msp->xor_base = devm_ioremap(&pdev->dev, res->start,
1308                                      res->end - res->start + 1);
1309         if (!msp->xor_base)
1310                 return -EBUSY;
1311
1312         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1313         if (!res)
1314                 return -ENODEV;
1315
1316         msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1317                                           res->end - res->start + 1);
1318         if (!msp->xor_high_base)
1319                 return -EBUSY;
1320
1321         platform_set_drvdata(pdev, msp);
1322
1323         /*
1324          * (Re-)program MBUS remapping windows if we are asked to.
1325          */
1326         if (msd != NULL && msd->dram != NULL)
1327                 mv_xor_conf_mbus_windows(msp, msd->dram);
1328
1329         return 0;
1330 }
1331
1332 static int mv_xor_shared_remove(struct platform_device *pdev)
1333 {
1334         return 0;
1335 }
1336
1337 static struct platform_driver mv_xor_shared_driver = {
1338         .probe          = mv_xor_shared_probe,
1339         .remove         = mv_xor_shared_remove,
1340         .driver         = {
1341                 .owner  = THIS_MODULE,
1342                 .name   = MV_XOR_SHARED_NAME,
1343         },
1344 };
1345
1346
1347 static int __init mv_xor_init(void)
1348 {
1349         int rc;
1350
1351         rc = platform_driver_register(&mv_xor_shared_driver);
1352         if (!rc) {
1353                 rc = platform_driver_register(&mv_xor_driver);
1354                 if (rc)
1355                         platform_driver_unregister(&mv_xor_shared_driver);
1356         }
1357         return rc;
1358 }
1359 module_init(mv_xor_init);
1360
1361 /* it's currently unsafe to unload this module */
1362 #if 0
1363 static void __exit mv_xor_exit(void)
1364 {
1365         platform_driver_unregister(&mv_xor_driver);
1366         platform_driver_unregister(&mv_xor_shared_driver);
1367         return;
1368 }
1369
1370 module_exit(mv_xor_exit);
1371 #endif
1372
1373 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1374 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1375 MODULE_LICENSE("GPL");