]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - drivers/net/wireless/bcm43xx/bcm43xx_dma.c
sch_htb: fix "too many events" situation
[linux-2.6.git] / drivers / net / wireless / bcm43xx / bcm43xx_dma.c
1 /*
2
3   Broadcom BCM43xx wireless driver
4
5   DMA ringbuffer and descriptor allocation/management
6
7   Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
8
9   Some code in this file is derived from the b44.c driver
10   Copyright (C) 2002 David S. Miller
11   Copyright (C) Pekka Pietikainen
12
13   This program is free software; you can redistribute it and/or modify
14   it under the terms of the GNU General Public License as published by
15   the Free Software Foundation; either version 2 of the License, or
16   (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful,
19   but WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21   GNU General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; see the file COPYING.  If not, write to
25   the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26   Boston, MA 02110-1301, USA.
27
28 */
29
30 #include "bcm43xx.h"
31 #include "bcm43xx_dma.h"
32 #include "bcm43xx_main.h"
33 #include "bcm43xx_debugfs.h"
34 #include "bcm43xx_power.h"
35 #include "bcm43xx_xmit.h"
36
37 #include <linux/dma-mapping.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/skbuff.h>
41
42
43 static inline int free_slots(struct bcm43xx_dmaring *ring)
44 {
45         return (ring->nr_slots - ring->used_slots);
46 }
47
48 static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
49 {
50         assert(slot >= -1 && slot <= ring->nr_slots - 1);
51         if (slot == ring->nr_slots - 1)
52                 return 0;
53         return slot + 1;
54 }
55
56 static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
57 {
58         assert(slot >= 0 && slot <= ring->nr_slots - 1);
59         if (slot == 0)
60                 return ring->nr_slots - 1;
61         return slot - 1;
62 }
63
64 /* Request a slot for usage. */
65 static inline
66 int request_slot(struct bcm43xx_dmaring *ring)
67 {
68         int slot;
69
70         assert(ring->tx);
71         assert(!ring->suspended);
72         assert(free_slots(ring) != 0);
73
74         slot = next_slot(ring, ring->current_slot);
75         ring->current_slot = slot;
76         ring->used_slots++;
77
78         /* Check the number of available slots and suspend TX,
79          * if we are running low on free slots.
80          */
81         if (unlikely(free_slots(ring) < ring->suspend_mark)) {
82                 netif_stop_queue(ring->bcm->net_dev);
83                 ring->suspended = 1;
84         }
85 #ifdef CONFIG_BCM43XX_DEBUG
86         if (ring->used_slots > ring->max_used_slots)
87                 ring->max_used_slots = ring->used_slots;
88 #endif /* CONFIG_BCM43XX_DEBUG*/
89
90         return slot;
91 }
92
93 /* Return a slot to the free slots. */
94 static inline
95 void return_slot(struct bcm43xx_dmaring *ring, int slot)
96 {
97         assert(ring->tx);
98
99         ring->used_slots--;
100
101         /* Check if TX is suspended and check if we have
102          * enough free slots to resume it again.
103          */
104         if (unlikely(ring->suspended)) {
105                 if (free_slots(ring) >= ring->resume_mark) {
106                         ring->suspended = 0;
107                         netif_wake_queue(ring->bcm->net_dev);
108                 }
109         }
110 }
111
112 u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
113 {
114         static const u16 map64[] = {
115                 BCM43xx_MMIO_DMA64_BASE0,
116                 BCM43xx_MMIO_DMA64_BASE1,
117                 BCM43xx_MMIO_DMA64_BASE2,
118                 BCM43xx_MMIO_DMA64_BASE3,
119                 BCM43xx_MMIO_DMA64_BASE4,
120                 BCM43xx_MMIO_DMA64_BASE5,
121         };
122         static const u16 map32[] = {
123                 BCM43xx_MMIO_DMA32_BASE0,
124                 BCM43xx_MMIO_DMA32_BASE1,
125                 BCM43xx_MMIO_DMA32_BASE2,
126                 BCM43xx_MMIO_DMA32_BASE3,
127                 BCM43xx_MMIO_DMA32_BASE4,
128                 BCM43xx_MMIO_DMA32_BASE5,
129         };
130
131         if (dma64bit) {
132                 assert(controller_idx >= 0 &&
133                        controller_idx < ARRAY_SIZE(map64));
134                 return map64[controller_idx];
135         }
136         assert(controller_idx >= 0 &&
137                controller_idx < ARRAY_SIZE(map32));
138         return map32[controller_idx];
139 }
140
141 static inline
142 dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
143                           unsigned char *buf,
144                           size_t len,
145                           int tx)
146 {
147         dma_addr_t dmaaddr;
148         int direction = PCI_DMA_FROMDEVICE;
149
150         if (tx)
151                 direction = PCI_DMA_TODEVICE;
152
153         dmaaddr = pci_map_single(ring->bcm->pci_dev,
154                                          buf, len,
155                                          direction);
156
157         return dmaaddr;
158 }
159
160 static inline
161 void unmap_descbuffer(struct bcm43xx_dmaring *ring,
162                       dma_addr_t addr,
163                       size_t len,
164                       int tx)
165 {
166         if (tx) {
167                 pci_unmap_single(ring->bcm->pci_dev,
168                                  addr, len,
169                                  PCI_DMA_TODEVICE);
170         } else {
171                 pci_unmap_single(ring->bcm->pci_dev,
172                                  addr, len,
173                                  PCI_DMA_FROMDEVICE);
174         }
175 }
176
177 static inline
178 void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
179                              dma_addr_t addr,
180                              size_t len)
181 {
182         assert(!ring->tx);
183
184         pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
185                                     addr, len, PCI_DMA_FROMDEVICE);
186 }
187
188 static inline
189 void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
190                                 dma_addr_t addr,
191                                 size_t len)
192 {
193         assert(!ring->tx);
194
195         pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
196                                     addr, len, PCI_DMA_TODEVICE);
197 }
198
199 /* Unmap and free a descriptor buffer. */
200 static inline
201 void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
202                             struct bcm43xx_dmadesc_meta *meta,
203                             int irq_context)
204 {
205         assert(meta->skb);
206         if (irq_context)
207                 dev_kfree_skb_irq(meta->skb);
208         else
209                 dev_kfree_skb(meta->skb);
210         meta->skb = NULL;
211 }
212
213 static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
214 {
215         ring->descbase = pci_alloc_consistent(ring->bcm->pci_dev, BCM43xx_DMA_RINGMEMSIZE,
216                                             &(ring->dmabase));
217         if (!ring->descbase) {
218                 /* Allocation may have failed due to pci_alloc_consistent
219                    insisting on use of GFP_DMA, which is more restrictive
220                    than necessary...  */
221                 struct dma_desc *rx_ring;
222                 dma_addr_t rx_ring_dma;
223
224                 rx_ring = kzalloc(BCM43xx_DMA_RINGMEMSIZE, GFP_KERNEL);
225                 if (!rx_ring)
226                         goto out_err;
227
228                 rx_ring_dma = pci_map_single(ring->bcm->pci_dev, rx_ring,
229                                              BCM43xx_DMA_RINGMEMSIZE,
230                                              PCI_DMA_BIDIRECTIONAL);
231
232                 if (pci_dma_mapping_error(rx_ring_dma) ||
233                     rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
234                         /* Sigh... */
235                         if (!pci_dma_mapping_error(rx_ring_dma))
236                                 pci_unmap_single(ring->bcm->pci_dev,
237                                                  rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
238                                                  PCI_DMA_BIDIRECTIONAL);
239                         rx_ring_dma = pci_map_single(ring->bcm->pci_dev,
240                                                  rx_ring, BCM43xx_DMA_RINGMEMSIZE,
241                                                  PCI_DMA_BIDIRECTIONAL);
242                         if (pci_dma_mapping_error(rx_ring_dma) ||
243                             rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
244                                 assert(0);
245                                 if (!pci_dma_mapping_error(rx_ring_dma))
246                                         pci_unmap_single(ring->bcm->pci_dev,
247                                                          rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
248                                                          PCI_DMA_BIDIRECTIONAL);
249                                 goto out_err;
250                         }
251                 }
252
253                 ring->descbase = rx_ring;
254                 ring->dmabase = rx_ring_dma;
255         }
256         memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
257
258         return 0;
259 out_err:
260         printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
261         return -ENOMEM;
262 }
263
264 static void free_ringmemory(struct bcm43xx_dmaring *ring)
265 {
266         struct device *dev = &(ring->bcm->pci_dev->dev);
267
268         dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
269                           ring->descbase, ring->dmabase);
270 }
271
272 /* Reset the RX DMA channel */
273 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
274                                    u16 mmio_base, int dma64)
275 {
276         int i;
277         u32 value;
278         u16 offset;
279
280         offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
281         bcm43xx_write32(bcm, mmio_base + offset, 0);
282         for (i = 0; i < 1000; i++) {
283                 offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
284                 value = bcm43xx_read32(bcm, mmio_base + offset);
285                 if (dma64) {
286                         value &= BCM43xx_DMA64_RXSTAT;
287                         if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
288                                 i = -1;
289                                 break;
290                         }
291                 } else {
292                         value &= BCM43xx_DMA32_RXSTATE;
293                         if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
294                                 i = -1;
295                                 break;
296                         }
297                 }
298                 udelay(10);
299         }
300         if (i != -1) {
301                 printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n");
302                 return -ENODEV;
303         }
304
305         return 0;
306 }
307
308 /* Reset the RX DMA channel */
309 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
310                                    u16 mmio_base, int dma64)
311 {
312         int i;
313         u32 value;
314         u16 offset;
315
316         for (i = 0; i < 1000; i++) {
317                 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
318                 value = bcm43xx_read32(bcm, mmio_base + offset);
319                 if (dma64) {
320                         value &= BCM43xx_DMA64_TXSTAT;
321                         if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
322                             value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
323                             value == BCM43xx_DMA64_TXSTAT_STOPPED)
324                                 break;
325                 } else {
326                         value &= BCM43xx_DMA32_TXSTATE;
327                         if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
328                             value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
329                             value == BCM43xx_DMA32_TXSTAT_STOPPED)
330                                 break;
331                 }
332                 udelay(10);
333         }
334         offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
335         bcm43xx_write32(bcm, mmio_base + offset, 0);
336         for (i = 0; i < 1000; i++) {
337                 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
338                 value = bcm43xx_read32(bcm, mmio_base + offset);
339                 if (dma64) {
340                         value &= BCM43xx_DMA64_TXSTAT;
341                         if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
342                                 i = -1;
343                                 break;
344                         }
345                 } else {
346                         value &= BCM43xx_DMA32_TXSTATE;
347                         if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
348                                 i = -1;
349                                 break;
350                         }
351                 }
352                 udelay(10);
353         }
354         if (i != -1) {
355                 printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n");
356                 return -ENODEV;
357         }
358         /* ensure the reset is completed. */
359         udelay(300);
360
361         return 0;
362 }
363
364 static void fill_descriptor(struct bcm43xx_dmaring *ring,
365                             struct bcm43xx_dmadesc_generic *desc,
366                             dma_addr_t dmaaddr,
367                             u16 bufsize,
368                             int start, int end, int irq)
369 {
370         int slot;
371
372         slot = bcm43xx_dma_desc2idx(ring, desc);
373         assert(slot >= 0 && slot < ring->nr_slots);
374
375         if (ring->dma64) {
376                 u32 ctl0 = 0, ctl1 = 0;
377                 u32 addrlo, addrhi;
378                 u32 addrext;
379
380                 addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
381                 addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING);
382                 addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
383                 addrhi |= ring->routing;
384                 if (slot == ring->nr_slots - 1)
385                         ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
386                 if (start)
387                         ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
388                 if (end)
389                         ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
390                 if (irq)
391                         ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
392                 ctl1 |= (bufsize - ring->frameoffset)
393                         & BCM43xx_DMA64_DCTL1_BYTECNT;
394                 ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
395                         & BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
396
397                 desc->dma64.control0 = cpu_to_le32(ctl0);
398                 desc->dma64.control1 = cpu_to_le32(ctl1);
399                 desc->dma64.address_low = cpu_to_le32(addrlo);
400                 desc->dma64.address_high = cpu_to_le32(addrhi);
401         } else {
402                 u32 ctl;
403                 u32 addr;
404                 u32 addrext;
405
406                 addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING);
407                 addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING)
408                            >> BCM43xx_DMA32_ROUTING_SHIFT;
409                 addr |= ring->routing;
410                 ctl = (bufsize - ring->frameoffset)
411                       & BCM43xx_DMA32_DCTL_BYTECNT;
412                 if (slot == ring->nr_slots - 1)
413                         ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
414                 if (start)
415                         ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
416                 if (end)
417                         ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
418                 if (irq)
419                         ctl |= BCM43xx_DMA32_DCTL_IRQ;
420                 ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
421                        & BCM43xx_DMA32_DCTL_ADDREXT_MASK;
422
423                 desc->dma32.control = cpu_to_le32(ctl);
424                 desc->dma32.address = cpu_to_le32(addr);
425         }
426 }
427
428 static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
429                                struct bcm43xx_dmadesc_generic *desc,
430                                struct bcm43xx_dmadesc_meta *meta,
431                                gfp_t gfp_flags)
432 {
433         struct bcm43xx_rxhdr *rxhdr;
434         struct bcm43xx_hwxmitstatus *xmitstat;
435         dma_addr_t dmaaddr;
436         struct sk_buff *skb;
437
438         assert(!ring->tx);
439
440         skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
441         if (unlikely(!skb))
442                 return -ENOMEM;
443         dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
444         /* This hardware bug work-around adapted from the b44 driver.
445            The chip may be unable to do PCI DMA to/from anything above 1GB */
446         if (pci_dma_mapping_error(dmaaddr) ||
447             dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
448                 /* This one has 30-bit addressing... */
449                 if (!pci_dma_mapping_error(dmaaddr))
450                         pci_unmap_single(ring->bcm->pci_dev,
451                                          dmaaddr, ring->rx_buffersize,
452                                          PCI_DMA_FROMDEVICE);
453                 dev_kfree_skb_any(skb);
454                 skb = __dev_alloc_skb(ring->rx_buffersize,GFP_DMA);
455                 if (skb == NULL)
456                         return -ENOMEM;
457                 dmaaddr = pci_map_single(ring->bcm->pci_dev,
458                                          skb->data, ring->rx_buffersize,
459                                          PCI_DMA_FROMDEVICE);
460                 if (pci_dma_mapping_error(dmaaddr) ||
461                     dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
462                         assert(0);
463                         dev_kfree_skb_any(skb);
464                         return -ENOMEM;
465                 }
466         }
467         meta->skb = skb;
468         meta->dmaaddr = dmaaddr;
469         skb->dev = ring->bcm->net_dev;
470
471         fill_descriptor(ring, desc, dmaaddr,
472                         ring->rx_buffersize, 0, 0, 0);
473
474         rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
475         rxhdr->frame_length = 0;
476         rxhdr->flags1 = 0;
477         xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data);
478         xmitstat->cookie = 0;
479
480         return 0;
481 }
482
483 /* Allocate the initial descbuffers.
484  * This is used for an RX ring only.
485  */
486 static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
487 {
488         int i, err = -ENOMEM;
489         struct bcm43xx_dmadesc_generic *desc;
490         struct bcm43xx_dmadesc_meta *meta;
491
492         for (i = 0; i < ring->nr_slots; i++) {
493                 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
494
495                 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
496                 if (err)
497                         goto err_unwind;
498         }
499         mb();
500         ring->used_slots = ring->nr_slots;
501         err = 0;
502 out:
503         return err;
504
505 err_unwind:
506         for (i--; i >= 0; i--) {
507                 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
508
509                 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
510                 dev_kfree_skb(meta->skb);
511         }
512         goto out;
513 }
514
515 /* Do initial setup of the DMA controller.
516  * Reset the controller, write the ring busaddress
517  * and switch the "enable" bit on.
518  */
519 static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
520 {
521         int err = 0;
522         u32 value;
523         u32 addrext;
524
525         if (ring->tx) {
526                 if (ring->dma64) {
527                         u64 ringbase = (u64)(ring->dmabase);
528
529                         addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
530                         value = BCM43xx_DMA64_TXENABLE;
531                         value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
532                                 & BCM43xx_DMA64_TXADDREXT_MASK;
533                         bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
534                         bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
535                                         (ringbase & 0xFFFFFFFF));
536                         bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
537                                         ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
538                                         | ring->routing);
539                 } else {
540                         u32 ringbase = (u32)(ring->dmabase);
541
542                         addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
543                         value = BCM43xx_DMA32_TXENABLE;
544                         value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
545                                 & BCM43xx_DMA32_TXADDREXT_MASK;
546                         bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
547                         bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
548                                         (ringbase & ~BCM43xx_DMA32_ROUTING)
549                                         | ring->routing);
550                 }
551         } else {
552                 err = alloc_initial_descbuffers(ring);
553                 if (err)
554                         goto out;
555                 if (ring->dma64) {
556                         u64 ringbase = (u64)(ring->dmabase);
557
558                         addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
559                         value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
560                         value |= BCM43xx_DMA64_RXENABLE;
561                         value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
562                                 & BCM43xx_DMA64_RXADDREXT_MASK;
563                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
564                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
565                                         (ringbase & 0xFFFFFFFF));
566                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
567                                         ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
568                                         | ring->routing);
569                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
570                 } else {
571                         u32 ringbase = (u32)(ring->dmabase);
572
573                         addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
574                         value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
575                         value |= BCM43xx_DMA32_RXENABLE;
576                         value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
577                                 & BCM43xx_DMA32_RXADDREXT_MASK;
578                         bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
579                         bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
580                                         (ringbase & ~BCM43xx_DMA32_ROUTING)
581                                         | ring->routing);
582                         bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
583                 }
584         }
585
586 out:
587         return err;
588 }
589
590 /* Shutdown the DMA controller. */
591 static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
592 {
593         if (ring->tx) {
594                 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64);
595                 if (ring->dma64) {
596                         bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
597                         bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
598                 } else
599                         bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
600         } else {
601                 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64);
602                 if (ring->dma64) {
603                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
604                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
605                 } else
606                         bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
607         }
608 }
609
610 static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
611 {
612         struct bcm43xx_dmadesc_generic *desc;
613         struct bcm43xx_dmadesc_meta *meta;
614         int i;
615
616         if (!ring->used_slots)
617                 return;
618         for (i = 0; i < ring->nr_slots; i++) {
619                 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
620
621                 if (!meta->skb) {
622                         assert(ring->tx);
623                         continue;
624                 }
625                 if (ring->tx) {
626                         unmap_descbuffer(ring, meta->dmaaddr,
627                                         meta->skb->len, 1);
628                 } else {
629                         unmap_descbuffer(ring, meta->dmaaddr,
630                                         ring->rx_buffersize, 0);
631                 }
632                 free_descriptor_buffer(ring, meta, 0);
633         }
634 }
635
636 /* Main initialization function. */
637 static
638 struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
639                                                int controller_index,
640                                                int for_tx,
641                                                int dma64)
642 {
643         struct bcm43xx_dmaring *ring;
644         int err;
645         int nr_slots;
646
647         ring = kzalloc(sizeof(*ring), GFP_KERNEL);
648         if (!ring)
649                 goto out;
650
651         nr_slots = BCM43xx_RXRING_SLOTS;
652         if (for_tx)
653                 nr_slots = BCM43xx_TXRING_SLOTS;
654
655         ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
656                              GFP_KERNEL);
657         if (!ring->meta)
658                 goto err_kfree_ring;
659
660         ring->routing = BCM43xx_DMA32_CLIENTTRANS;
661         if (dma64)
662                 ring->routing = BCM43xx_DMA64_CLIENTTRANS;
663
664         ring->bcm = bcm;
665         ring->nr_slots = nr_slots;
666         ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
667         ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
668         assert(ring->suspend_mark < ring->resume_mark);
669         ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
670         ring->index = controller_index;
671         ring->dma64 = !!dma64;
672         if (for_tx) {
673                 ring->tx = 1;
674                 ring->current_slot = -1;
675         } else {
676                 if (ring->index == 0) {
677                         ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
678                         ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
679                 } else if (ring->index == 3) {
680                         ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
681                         ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
682                 } else
683                         assert(0);
684         }
685
686         err = alloc_ringmemory(ring);
687         if (err)
688                 goto err_kfree_meta;
689         err = dmacontroller_setup(ring);
690         if (err)
691                 goto err_free_ringmemory;
692         return ring;
693
694 out:
695         printk(KERN_ERR PFX "Error in bcm43xx_setup_dmaring\n");
696         return ring;
697
698 err_free_ringmemory:
699         free_ringmemory(ring);
700 err_kfree_meta:
701         kfree(ring->meta);
702 err_kfree_ring:
703         kfree(ring);
704         ring = NULL;
705         goto out;
706 }
707
708 /* Main cleanup function. */
709 static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
710 {
711         if (!ring)
712                 return;
713
714         dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
715                 (ring->dma64) ? "64" : "32",
716                 ring->mmio_base,
717                 (ring->tx) ? "TX" : "RX",
718                 ring->max_used_slots, ring->nr_slots);
719         /* Device IRQs are disabled prior entering this function,
720          * so no need to take care of concurrency with rx handler stuff.
721          */
722         dmacontroller_cleanup(ring);
723         free_all_descbuffers(ring);
724         free_ringmemory(ring);
725
726         kfree(ring->meta);
727         kfree(ring);
728 }
729
730 void bcm43xx_dma_free(struct bcm43xx_private *bcm)
731 {
732         struct bcm43xx_dma *dma;
733
734         if (bcm43xx_using_pio(bcm))
735                 return;
736         dma = bcm43xx_current_dma(bcm);
737
738         bcm43xx_destroy_dmaring(dma->rx_ring3);
739         dma->rx_ring3 = NULL;
740         bcm43xx_destroy_dmaring(dma->rx_ring0);
741         dma->rx_ring0 = NULL;
742
743         bcm43xx_destroy_dmaring(dma->tx_ring5);
744         dma->tx_ring5 = NULL;
745         bcm43xx_destroy_dmaring(dma->tx_ring4);
746         dma->tx_ring4 = NULL;
747         bcm43xx_destroy_dmaring(dma->tx_ring3);
748         dma->tx_ring3 = NULL;
749         bcm43xx_destroy_dmaring(dma->tx_ring2);
750         dma->tx_ring2 = NULL;
751         bcm43xx_destroy_dmaring(dma->tx_ring1);
752         dma->tx_ring1 = NULL;
753         bcm43xx_destroy_dmaring(dma->tx_ring0);
754         dma->tx_ring0 = NULL;
755 }
756
757 int bcm43xx_dma_init(struct bcm43xx_private *bcm)
758 {
759         struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
760         struct bcm43xx_dmaring *ring;
761         int err = -ENOMEM;
762         int dma64 = 0;
763
764         bcm->dma_mask = bcm43xx_get_supported_dma_mask(bcm);
765         if (bcm->dma_mask == DMA_64BIT_MASK)
766                 dma64 = 1;
767         err = pci_set_dma_mask(bcm->pci_dev, bcm->dma_mask);
768         if (err)
769                 goto no_dma;
770         err = pci_set_consistent_dma_mask(bcm->pci_dev, bcm->dma_mask);
771         if (err)
772                 goto no_dma;
773
774         /* setup TX DMA channels. */
775         ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
776         if (!ring)
777                 goto out;
778         dma->tx_ring0 = ring;
779
780         ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64);
781         if (!ring)
782                 goto err_destroy_tx0;
783         dma->tx_ring1 = ring;
784
785         ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64);
786         if (!ring)
787                 goto err_destroy_tx1;
788         dma->tx_ring2 = ring;
789
790         ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64);
791         if (!ring)
792                 goto err_destroy_tx2;
793         dma->tx_ring3 = ring;
794
795         ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64);
796         if (!ring)
797                 goto err_destroy_tx3;
798         dma->tx_ring4 = ring;
799
800         ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64);
801         if (!ring)
802                 goto err_destroy_tx4;
803         dma->tx_ring5 = ring;
804
805         /* setup RX DMA channels. */
806         ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64);
807         if (!ring)
808                 goto err_destroy_tx5;
809         dma->rx_ring0 = ring;
810
811         if (bcm->current_core->rev < 5) {
812                 ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64);
813                 if (!ring)
814                         goto err_destroy_rx0;
815                 dma->rx_ring3 = ring;
816         }
817
818         dprintk(KERN_INFO PFX "%d-bit DMA initialized\n",
819                 (bcm->dma_mask == DMA_64BIT_MASK) ? 64 :
820                 (bcm->dma_mask == DMA_32BIT_MASK) ? 32 : 30);
821         err = 0;
822 out:
823         return err;
824
825 err_destroy_rx0:
826         bcm43xx_destroy_dmaring(dma->rx_ring0);
827         dma->rx_ring0 = NULL;
828 err_destroy_tx5:
829         bcm43xx_destroy_dmaring(dma->tx_ring5);
830         dma->tx_ring5 = NULL;
831 err_destroy_tx4:
832         bcm43xx_destroy_dmaring(dma->tx_ring4);
833         dma->tx_ring4 = NULL;
834 err_destroy_tx3:
835         bcm43xx_destroy_dmaring(dma->tx_ring3);
836         dma->tx_ring3 = NULL;
837 err_destroy_tx2:
838         bcm43xx_destroy_dmaring(dma->tx_ring2);
839         dma->tx_ring2 = NULL;
840 err_destroy_tx1:
841         bcm43xx_destroy_dmaring(dma->tx_ring1);
842         dma->tx_ring1 = NULL;
843 err_destroy_tx0:
844         bcm43xx_destroy_dmaring(dma->tx_ring0);
845         dma->tx_ring0 = NULL;
846 no_dma:
847 #ifdef CONFIG_BCM43XX_PIO
848         printk(KERN_WARNING PFX "DMA not supported on this device."
849                                 " Falling back to PIO.\n");
850         bcm->__using_pio = 1;
851         return -ENOSYS;
852 #else
853         printk(KERN_ERR PFX "FATAL: DMA not supported and PIO not configured. "
854                             "Please recompile the driver with PIO support.\n");
855         return -ENODEV;
856 #endif /* CONFIG_BCM43XX_PIO */
857 }
858
859 /* Generate a cookie for the TX header. */
860 static u16 generate_cookie(struct bcm43xx_dmaring *ring,
861                            int slot)
862 {
863         u16 cookie = 0x1000;
864
865         /* Use the upper 4 bits of the cookie as
866          * DMA controller ID and store the slot number
867          * in the lower 12 bits.
868          * Note that the cookie must never be 0, as this
869          * is a special value used in RX path.
870          */
871         switch (ring->index) {
872         case 0:
873                 cookie = 0xA000;
874                 break;
875         case 1:
876                 cookie = 0xB000;
877                 break;
878         case 2:
879                 cookie = 0xC000;
880                 break;
881         case 3:
882                 cookie = 0xD000;
883                 break;
884         case 4:
885                 cookie = 0xE000;
886                 break;
887         case 5:
888                 cookie = 0xF000;
889                 break;
890         }
891         assert(((u16)slot & 0xF000) == 0x0000);
892         cookie |= (u16)slot;
893
894         return cookie;
895 }
896
897 /* Inspect a cookie and find out to which controller/slot it belongs. */
898 static
899 struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
900                                       u16 cookie, int *slot)
901 {
902         struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
903         struct bcm43xx_dmaring *ring = NULL;
904
905         switch (cookie & 0xF000) {
906         case 0xA000:
907                 ring = dma->tx_ring0;
908                 break;
909         case 0xB000:
910                 ring = dma->tx_ring1;
911                 break;
912         case 0xC000:
913                 ring = dma->tx_ring2;
914                 break;
915         case 0xD000:
916                 ring = dma->tx_ring3;
917                 break;
918         case 0xE000:
919                 ring = dma->tx_ring4;
920                 break;
921         case 0xF000:
922                 ring = dma->tx_ring5;
923                 break;
924         default:
925                 assert(0);
926         }
927         *slot = (cookie & 0x0FFF);
928         assert(*slot >= 0 && *slot < ring->nr_slots);
929
930         return ring;
931 }
932
933 static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
934                                   int slot)
935 {
936         u16 offset;
937         int descsize;
938
939         /* Everything is ready to start. Buffers are DMA mapped and
940          * associated with slots.
941          * "slot" is the last slot of the new frame we want to transmit.
942          * Close your seat belts now, please.
943          */
944         wmb();
945         slot = next_slot(ring, slot);
946         offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX;
947         descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64)
948                 : sizeof(struct bcm43xx_dmadesc32);
949         bcm43xx_dma_write(ring, offset,
950                         (u32)(slot * descsize));
951 }
952
953 static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
954                             struct sk_buff *skb,
955                             u8 cur_frag)
956 {
957         int slot;
958         struct bcm43xx_dmadesc_generic *desc;
959         struct bcm43xx_dmadesc_meta *meta;
960         dma_addr_t dmaaddr;
961         struct sk_buff *bounce_skb;
962
963         assert(skb_shinfo(skb)->nr_frags == 0);
964
965         slot = request_slot(ring);
966         desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
967
968         /* Add a device specific TX header. */
969         assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
970         /* Reserve enough headroom for the device tx header. */
971         __skb_push(skb, sizeof(struct bcm43xx_txhdr));
972         /* Now calculate and add the tx header.
973          * The tx header includes the PLCP header.
974          */
975         bcm43xx_generate_txhdr(ring->bcm,
976                                (struct bcm43xx_txhdr *)skb->data,
977                                skb->data + sizeof(struct bcm43xx_txhdr),
978                                skb->len - sizeof(struct bcm43xx_txhdr),
979                                (cur_frag == 0),
980                                generate_cookie(ring, slot));
981         dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
982         if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
983                 /* chip cannot handle DMA to/from > 1GB, use bounce buffer (copied from b44 driver) */
984                 if (!dma_mapping_error(dmaaddr))
985                         unmap_descbuffer(ring, dmaaddr, skb->len, 1);
986                 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC|GFP_DMA);
987                 if (!bounce_skb)
988                         return;
989                 dmaaddr = map_descbuffer(ring, bounce_skb->data, bounce_skb->len, 1);
990                 if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
991                         if (!dma_mapping_error(dmaaddr))
992                                 unmap_descbuffer(ring, dmaaddr, skb->len, 1);
993                         dev_kfree_skb_any(bounce_skb);
994                         assert(0);
995                         return;
996                 }
997                 skb_copy_from_linear_data(skb, skb_put(bounce_skb, skb->len),
998                                           skb->len);
999                 dev_kfree_skb_any(skb);
1000                 skb = bounce_skb;
1001         }
1002
1003         meta->skb = skb;
1004         meta->dmaaddr = dmaaddr;
1005
1006         fill_descriptor(ring, desc, dmaaddr,
1007                         skb->len, 1, 1, 1);
1008
1009         /* Now transfer the whole frame. */
1010         dmacontroller_poke_tx(ring, slot);
1011 }
1012
1013 int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
1014                    struct ieee80211_txb *txb)
1015 {
1016         /* We just received a packet from the kernel network subsystem.
1017          * Add headers and DMA map the memory. Poke
1018          * the device to send the stuff.
1019          * Note that this is called from atomic context.
1020          */
1021         struct bcm43xx_dmaring *ring = bcm43xx_current_dma(bcm)->tx_ring1;
1022         u8 i;
1023         struct sk_buff *skb;
1024
1025         assert(ring->tx);
1026         if (unlikely(free_slots(ring) < txb->nr_frags)) {
1027                 /* The queue should be stopped,
1028                  * if we are low on free slots.
1029                  * If this ever triggers, we have to lower the suspend_mark.
1030                  */
1031                 dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n");
1032                 return -ENOMEM;
1033         }
1034
1035         for (i = 0; i < txb->nr_frags; i++) {
1036                 skb = txb->fragments[i];
1037                 /* Take skb from ieee80211_txb_free */
1038                 txb->fragments[i] = NULL;
1039                 dma_tx_fragment(ring, skb, i);
1040         }
1041         ieee80211_txb_free(txb);
1042
1043         return 0;
1044 }
1045
1046 void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
1047                                    struct bcm43xx_xmitstatus *status)
1048 {
1049         struct bcm43xx_dmaring *ring;
1050         struct bcm43xx_dmadesc_generic *desc;
1051         struct bcm43xx_dmadesc_meta *meta;
1052         int is_last_fragment;
1053         int slot;
1054         u32 tmp;
1055
1056         ring = parse_cookie(bcm, status->cookie, &slot);
1057         assert(ring);
1058         assert(ring->tx);
1059         while (1) {
1060                 assert(slot >= 0 && slot < ring->nr_slots);
1061                 desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
1062
1063                 if (ring->dma64) {
1064                         tmp = le32_to_cpu(desc->dma64.control0);
1065                         is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND);
1066                 } else {
1067                         tmp = le32_to_cpu(desc->dma32.control);
1068                         is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND);
1069                 }
1070                 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
1071                 free_descriptor_buffer(ring, meta, 1);
1072                 /* Everything belonging to the slot is unmapped
1073                  * and freed, so we can return it.
1074                  */
1075                 return_slot(ring, slot);
1076
1077                 if (is_last_fragment)
1078                         break;
1079                 slot = next_slot(ring, slot);
1080         }
1081         bcm->stats.last_tx = jiffies;
1082 }
1083
1084 static void dma_rx(struct bcm43xx_dmaring *ring,
1085                    int *slot)
1086 {
1087         struct bcm43xx_dmadesc_generic *desc;
1088         struct bcm43xx_dmadesc_meta *meta;
1089         struct bcm43xx_rxhdr *rxhdr;
1090         struct sk_buff *skb;
1091         u16 len;
1092         int err;
1093         dma_addr_t dmaaddr;
1094
1095         desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
1096
1097         sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1098         skb = meta->skb;
1099
1100         if (ring->index == 3) {
1101                 /* We received an xmit status. */
1102                 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
1103                 struct bcm43xx_xmitstatus stat;
1104                 int i = 0;
1105
1106                 stat.cookie = le16_to_cpu(hw->cookie);
1107                 while (stat.cookie == 0) {
1108                         if (unlikely(++i >= 10000)) {
1109                                 assert(0);
1110                                 break;
1111                         }
1112                         udelay(2);
1113                         barrier();
1114                         stat.cookie = le16_to_cpu(hw->cookie);
1115                 }
1116                 stat.flags = hw->flags;
1117                 stat.cnt1 = hw->cnt1;
1118                 stat.cnt2 = hw->cnt2;
1119                 stat.seq = le16_to_cpu(hw->seq);
1120                 stat.unknown = le16_to_cpu(hw->unknown);
1121
1122                 bcm43xx_debugfs_log_txstat(ring->bcm, &stat);
1123                 bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat);
1124                 /* recycle the descriptor buffer. */
1125                 sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
1126
1127                 return;
1128         }
1129         rxhdr = (struct bcm43xx_rxhdr *)skb->data;
1130         len = le16_to_cpu(rxhdr->frame_length);
1131         if (len == 0) {
1132                 int i = 0;
1133
1134                 do {
1135                         udelay(2);
1136                         barrier();
1137                         len = le16_to_cpu(rxhdr->frame_length);
1138                 } while (len == 0 && i++ < 5);
1139                 if (unlikely(len == 0)) {
1140                         /* recycle the descriptor buffer. */
1141                         sync_descbuffer_for_device(ring, meta->dmaaddr,
1142                                                    ring->rx_buffersize);
1143                         goto drop;
1144                 }
1145         }
1146         if (unlikely(len > ring->rx_buffersize)) {
1147                 /* The data did not fit into one descriptor buffer
1148                  * and is split over multiple buffers.
1149                  * This should never happen, as we try to allocate buffers
1150                  * big enough. So simply ignore this packet.
1151                  */
1152                 int cnt = 0;
1153                 s32 tmp = len;
1154
1155                 while (1) {
1156                         desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
1157                         /* recycle the descriptor buffer. */
1158                         sync_descbuffer_for_device(ring, meta->dmaaddr,
1159                                                    ring->rx_buffersize);
1160                         *slot = next_slot(ring, *slot);
1161                         cnt++;
1162                         tmp -= ring->rx_buffersize;
1163                         if (tmp <= 0)
1164                                 break;
1165                 }
1166                 printkl(KERN_ERR PFX "DMA RX buffer too small "
1167                         "(len: %u, buffer: %u, nr-dropped: %d)\n",
1168                         len, ring->rx_buffersize, cnt);
1169                 goto drop;
1170         }
1171         len -= IEEE80211_FCS_LEN;
1172
1173         dmaaddr = meta->dmaaddr;
1174         err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1175         if (unlikely(err)) {
1176                 dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
1177                 sync_descbuffer_for_device(ring, dmaaddr,
1178                                            ring->rx_buffersize);
1179                 goto drop;
1180         }
1181
1182         unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1183         skb_put(skb, len + ring->frameoffset);
1184         skb_pull(skb, ring->frameoffset);
1185
1186         err = bcm43xx_rx(ring->bcm, skb, rxhdr);
1187         if (err) {
1188                 dev_kfree_skb_irq(skb);
1189                 goto drop;
1190         }
1191
1192 drop:
1193         return;
1194 }
1195
1196 void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
1197 {
1198         u32 status;
1199         u16 descptr;
1200         int slot, current_slot;
1201 #ifdef CONFIG_BCM43XX_DEBUG
1202         int used_slots = 0;
1203 #endif
1204
1205         assert(!ring->tx);
1206         if (ring->dma64) {
1207                 status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
1208                 descptr = (status & BCM43xx_DMA64_RXSTATDPTR);
1209                 current_slot = descptr / sizeof(struct bcm43xx_dmadesc64);
1210         } else {
1211                 status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
1212                 descptr = (status & BCM43xx_DMA32_RXDPTR);
1213                 current_slot = descptr / sizeof(struct bcm43xx_dmadesc32);
1214         }
1215         assert(current_slot >= 0 && current_slot < ring->nr_slots);
1216
1217         slot = ring->current_slot;
1218         for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
1219                 dma_rx(ring, &slot);
1220 #ifdef CONFIG_BCM43XX_DEBUG
1221                 if (++used_slots > ring->max_used_slots)
1222                         ring->max_used_slots = used_slots;
1223 #endif
1224         }
1225         if (ring->dma64) {
1226                 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
1227                                 (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
1228         } else {
1229                 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
1230                                 (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
1231         }
1232         ring->current_slot = slot;
1233 }
1234
1235 void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
1236 {
1237         assert(ring->tx);
1238         bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
1239         if (ring->dma64) {
1240                 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
1241                                 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
1242                                 | BCM43xx_DMA64_TXSUSPEND);
1243         } else {
1244                 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
1245                                 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
1246                                 | BCM43xx_DMA32_TXSUSPEND);
1247         }
1248 }
1249
1250 void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
1251 {
1252         assert(ring->tx);
1253         if (ring->dma64) {
1254                 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
1255                                 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
1256                                 & ~BCM43xx_DMA64_TXSUSPEND);
1257         } else {
1258                 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
1259                                 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
1260                                 & ~BCM43xx_DMA32_TXSUSPEND);
1261         }
1262         bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
1263 }