Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/linville/wireles...
[linux-2.6.git] / drivers / net / wireless / b43legacy / dma.c
1 /*
2
3   Broadcom B43legacy wireless driver
4
5   DMA ringbuffer and descriptor allocation/management
6
7   Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
8
9   Some code in this file is derived from the b44.c driver
10   Copyright (C) 2002 David S. Miller
11   Copyright (C) Pekka Pietikainen
12
13   This program is free software; you can redistribute it and/or modify
14   it under the terms of the GNU General Public License as published by
15   the Free Software Foundation; either version 2 of the License, or
16   (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful,
19   but WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21   GNU General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; see the file COPYING.  If not, write to
25   the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26   Boston, MA 02110-1301, USA.
27
28 */
29
30 #include "b43legacy.h"
31 #include "dma.h"
32 #include "main.h"
33 #include "debugfs.h"
34 #include "xmit.h"
35
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <net/dst.h>
41
42 /* 32bit DMA ops. */
43 static
44 struct b43legacy_dmadesc_generic *op32_idx2desc(
45                                         struct b43legacy_dmaring *ring,
46                                         int slot,
47                                         struct b43legacy_dmadesc_meta **meta)
48 {
49         struct b43legacy_dmadesc32 *desc;
50
51         *meta = &(ring->meta[slot]);
52         desc = ring->descbase;
53         desc = &(desc[slot]);
54
55         return (struct b43legacy_dmadesc_generic *)desc;
56 }
57
58 static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
59                                  struct b43legacy_dmadesc_generic *desc,
60                                  dma_addr_t dmaaddr, u16 bufsize,
61                                  int start, int end, int irq)
62 {
63         struct b43legacy_dmadesc32 *descbase = ring->descbase;
64         int slot;
65         u32 ctl;
66         u32 addr;
67         u32 addrext;
68
69         slot = (int)(&(desc->dma32) - descbase);
70         B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
71
72         addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
73         addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
74                    >> SSB_DMA_TRANSLATION_SHIFT;
75         addr |= ssb_dma_translation(ring->dev->dev);
76         ctl = (bufsize - ring->frameoffset)
77               & B43legacy_DMA32_DCTL_BYTECNT;
78         if (slot == ring->nr_slots - 1)
79                 ctl |= B43legacy_DMA32_DCTL_DTABLEEND;
80         if (start)
81                 ctl |= B43legacy_DMA32_DCTL_FRAMESTART;
82         if (end)
83                 ctl |= B43legacy_DMA32_DCTL_FRAMEEND;
84         if (irq)
85                 ctl |= B43legacy_DMA32_DCTL_IRQ;
86         ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT)
87                & B43legacy_DMA32_DCTL_ADDREXT_MASK;
88
89         desc->dma32.control = cpu_to_le32(ctl);
90         desc->dma32.address = cpu_to_le32(addr);
91 }
92
93 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
94 {
95         b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
96                             (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
97 }
98
99 static void op32_tx_suspend(struct b43legacy_dmaring *ring)
100 {
101         b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
102                             b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
103                             | B43legacy_DMA32_TXSUSPEND);
104 }
105
106 static void op32_tx_resume(struct b43legacy_dmaring *ring)
107 {
108         b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
109                             b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
110                             & ~B43legacy_DMA32_TXSUSPEND);
111 }
112
113 static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
114 {
115         u32 val;
116
117         val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
118         val &= B43legacy_DMA32_RXDPTR;
119
120         return (val / sizeof(struct b43legacy_dmadesc32));
121 }
122
123 static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
124                                     int slot)
125 {
126         b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
127                             (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
128 }
129
130 static const struct b43legacy_dma_ops dma32_ops = {
131         .idx2desc               = op32_idx2desc,
132         .fill_descriptor        = op32_fill_descriptor,
133         .poke_tx                = op32_poke_tx,
134         .tx_suspend             = op32_tx_suspend,
135         .tx_resume              = op32_tx_resume,
136         .get_current_rxslot     = op32_get_current_rxslot,
137         .set_current_rxslot     = op32_set_current_rxslot,
138 };
139
140 /* 64bit DMA ops. */
141 static
142 struct b43legacy_dmadesc_generic *op64_idx2desc(
143                                         struct b43legacy_dmaring *ring,
144                                         int slot,
145                                         struct b43legacy_dmadesc_meta
146                                         **meta)
147 {
148         struct b43legacy_dmadesc64 *desc;
149
150         *meta = &(ring->meta[slot]);
151         desc = ring->descbase;
152         desc = &(desc[slot]);
153
154         return (struct b43legacy_dmadesc_generic *)desc;
155 }
156
157 static void op64_fill_descriptor(struct b43legacy_dmaring *ring,
158                                  struct b43legacy_dmadesc_generic *desc,
159                                  dma_addr_t dmaaddr, u16 bufsize,
160                                  int start, int end, int irq)
161 {
162         struct b43legacy_dmadesc64 *descbase = ring->descbase;
163         int slot;
164         u32 ctl0 = 0;
165         u32 ctl1 = 0;
166         u32 addrlo;
167         u32 addrhi;
168         u32 addrext;
169
170         slot = (int)(&(desc->dma64) - descbase);
171         B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
172
173         addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
174         addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
175         addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
176                   >> SSB_DMA_TRANSLATION_SHIFT;
177         addrhi |= ssb_dma_translation(ring->dev->dev);
178         if (slot == ring->nr_slots - 1)
179                 ctl0 |= B43legacy_DMA64_DCTL0_DTABLEEND;
180         if (start)
181                 ctl0 |= B43legacy_DMA64_DCTL0_FRAMESTART;
182         if (end)
183                 ctl0 |= B43legacy_DMA64_DCTL0_FRAMEEND;
184         if (irq)
185                 ctl0 |= B43legacy_DMA64_DCTL0_IRQ;
186         ctl1 |= (bufsize - ring->frameoffset)
187                 & B43legacy_DMA64_DCTL1_BYTECNT;
188         ctl1 |= (addrext << B43legacy_DMA64_DCTL1_ADDREXT_SHIFT)
189                 & B43legacy_DMA64_DCTL1_ADDREXT_MASK;
190
191         desc->dma64.control0 = cpu_to_le32(ctl0);
192         desc->dma64.control1 = cpu_to_le32(ctl1);
193         desc->dma64.address_low = cpu_to_le32(addrlo);
194         desc->dma64.address_high = cpu_to_le32(addrhi);
195 }
196
197 static void op64_poke_tx(struct b43legacy_dmaring *ring, int slot)
198 {
199         b43legacy_dma_write(ring, B43legacy_DMA64_TXINDEX,
200                             (u32)(slot * sizeof(struct b43legacy_dmadesc64)));
201 }
202
203 static void op64_tx_suspend(struct b43legacy_dmaring *ring)
204 {
205         b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
206                             b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL)
207                             | B43legacy_DMA64_TXSUSPEND);
208 }
209
210 static void op64_tx_resume(struct b43legacy_dmaring *ring)
211 {
212         b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
213                             b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL)
214                             & ~B43legacy_DMA64_TXSUSPEND);
215 }
216
217 static int op64_get_current_rxslot(struct b43legacy_dmaring *ring)
218 {
219         u32 val;
220
221         val = b43legacy_dma_read(ring, B43legacy_DMA64_RXSTATUS);
222         val &= B43legacy_DMA64_RXSTATDPTR;
223
224         return (val / sizeof(struct b43legacy_dmadesc64));
225 }
226
227 static void op64_set_current_rxslot(struct b43legacy_dmaring *ring,
228                                     int slot)
229 {
230         b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX,
231                             (u32)(slot * sizeof(struct b43legacy_dmadesc64)));
232 }
233
234 static const struct b43legacy_dma_ops dma64_ops = {
235         .idx2desc               = op64_idx2desc,
236         .fill_descriptor        = op64_fill_descriptor,
237         .poke_tx                = op64_poke_tx,
238         .tx_suspend             = op64_tx_suspend,
239         .tx_resume              = op64_tx_resume,
240         .get_current_rxslot     = op64_get_current_rxslot,
241         .set_current_rxslot     = op64_set_current_rxslot,
242 };
243
244
245 static inline int free_slots(struct b43legacy_dmaring *ring)
246 {
247         return (ring->nr_slots - ring->used_slots);
248 }
249
250 static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
251 {
252         B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
253         if (slot == ring->nr_slots - 1)
254                 return 0;
255         return slot + 1;
256 }
257
258 static inline int prev_slot(struct b43legacy_dmaring *ring, int slot)
259 {
260         B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
261         if (slot == 0)
262                 return ring->nr_slots - 1;
263         return slot - 1;
264 }
265
266 #ifdef CONFIG_B43LEGACY_DEBUG
267 static void update_max_used_slots(struct b43legacy_dmaring *ring,
268                                   int current_used_slots)
269 {
270         if (current_used_slots <= ring->max_used_slots)
271                 return;
272         ring->max_used_slots = current_used_slots;
273         if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
274                 b43legacydbg(ring->dev->wl,
275                        "max_used_slots increased to %d on %s ring %d\n",
276                        ring->max_used_slots,
277                        ring->tx ? "TX" : "RX",
278                        ring->index);
279 }
280 #else
281 static inline
282 void update_max_used_slots(struct b43legacy_dmaring *ring,
283                            int current_used_slots)
284 { }
285 #endif /* DEBUG */
286
287 /* Request a slot for usage. */
288 static inline
289 int request_slot(struct b43legacy_dmaring *ring)
290 {
291         int slot;
292
293         B43legacy_WARN_ON(!ring->tx);
294         B43legacy_WARN_ON(ring->stopped);
295         B43legacy_WARN_ON(free_slots(ring) == 0);
296
297         slot = next_slot(ring, ring->current_slot);
298         ring->current_slot = slot;
299         ring->used_slots++;
300
301         update_max_used_slots(ring, ring->used_slots);
302
303         return slot;
304 }
305
306 /* Mac80211-queue to b43legacy-ring mapping */
307 static struct b43legacy_dmaring *priority_to_txring(
308                                                 struct b43legacy_wldev *dev,
309                                                 int queue_priority)
310 {
311         struct b43legacy_dmaring *ring;
312
313 /*FIXME: For now we always run on TX-ring-1 */
314 return dev->dma.tx_ring1;
315
316         /* 0 = highest priority */
317         switch (queue_priority) {
318         default:
319                 B43legacy_WARN_ON(1);
320                 /* fallthrough */
321         case 0:
322                 ring = dev->dma.tx_ring3;
323                 break;
324         case 1:
325                 ring = dev->dma.tx_ring2;
326                 break;
327         case 2:
328                 ring = dev->dma.tx_ring1;
329                 break;
330         case 3:
331                 ring = dev->dma.tx_ring0;
332                 break;
333         case 4:
334                 ring = dev->dma.tx_ring4;
335                 break;
336         case 5:
337                 ring = dev->dma.tx_ring5;
338                 break;
339         }
340
341         return ring;
342 }
343
344 /* Bcm4301-ring to mac80211-queue mapping */
345 static inline int txring_to_priority(struct b43legacy_dmaring *ring)
346 {
347         static const u8 idx_to_prio[] =
348                 { 3, 2, 1, 0, 4, 5, };
349
350 /*FIXME: have only one queue, for now */
351 return 0;
352
353         return idx_to_prio[ring->index];
354 }
355
356
357 static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
358                                         int controller_idx)
359 {
360         static const u16 map64[] = {
361                 B43legacy_MMIO_DMA64_BASE0,
362                 B43legacy_MMIO_DMA64_BASE1,
363                 B43legacy_MMIO_DMA64_BASE2,
364                 B43legacy_MMIO_DMA64_BASE3,
365                 B43legacy_MMIO_DMA64_BASE4,
366                 B43legacy_MMIO_DMA64_BASE5,
367         };
368         static const u16 map32[] = {
369                 B43legacy_MMIO_DMA32_BASE0,
370                 B43legacy_MMIO_DMA32_BASE1,
371                 B43legacy_MMIO_DMA32_BASE2,
372                 B43legacy_MMIO_DMA32_BASE3,
373                 B43legacy_MMIO_DMA32_BASE4,
374                 B43legacy_MMIO_DMA32_BASE5,
375         };
376
377         if (type == B43legacy_DMA_64BIT) {
378                 B43legacy_WARN_ON(!(controller_idx >= 0 &&
379                                   controller_idx < ARRAY_SIZE(map64)));
380                 return map64[controller_idx];
381         }
382         B43legacy_WARN_ON(!(controller_idx >= 0 &&
383                           controller_idx < ARRAY_SIZE(map32)));
384         return map32[controller_idx];
385 }
386
387 static inline
388 dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
389                           unsigned char *buf,
390                           size_t len,
391                           int tx)
392 {
393         dma_addr_t dmaaddr;
394
395         if (tx)
396                 dmaaddr = ssb_dma_map_single(ring->dev->dev,
397                                              buf, len,
398                                              DMA_TO_DEVICE);
399         else
400                 dmaaddr = ssb_dma_map_single(ring->dev->dev,
401                                              buf, len,
402                                              DMA_FROM_DEVICE);
403
404         return dmaaddr;
405 }
406
407 static inline
408 void unmap_descbuffer(struct b43legacy_dmaring *ring,
409                       dma_addr_t addr,
410                       size_t len,
411                       int tx)
412 {
413         if (tx)
414                 ssb_dma_unmap_single(ring->dev->dev,
415                                      addr, len,
416                                      DMA_TO_DEVICE);
417         else
418                 ssb_dma_unmap_single(ring->dev->dev,
419                                      addr, len,
420                                      DMA_FROM_DEVICE);
421 }
422
423 static inline
424 void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
425                              dma_addr_t addr,
426                              size_t len)
427 {
428         B43legacy_WARN_ON(ring->tx);
429
430         ssb_dma_sync_single_for_cpu(ring->dev->dev,
431                                     addr, len, DMA_FROM_DEVICE);
432 }
433
434 static inline
435 void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
436                                 dma_addr_t addr,
437                                 size_t len)
438 {
439         B43legacy_WARN_ON(ring->tx);
440
441         ssb_dma_sync_single_for_device(ring->dev->dev,
442                                        addr, len, DMA_FROM_DEVICE);
443 }
444
445 static inline
446 void free_descriptor_buffer(struct b43legacy_dmaring *ring,
447                             struct b43legacy_dmadesc_meta *meta,
448                             int irq_context)
449 {
450         if (meta->skb) {
451                 if (irq_context)
452                         dev_kfree_skb_irq(meta->skb);
453                 else
454                         dev_kfree_skb(meta->skb);
455                 meta->skb = NULL;
456         }
457 }
458
459 static int alloc_ringmemory(struct b43legacy_dmaring *ring)
460 {
461         /* GFP flags must match the flags in free_ringmemory()! */
462         ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
463                                                   B43legacy_DMA_RINGMEMSIZE,
464                                                   &(ring->dmabase),
465                                                   GFP_KERNEL);
466         if (!ring->descbase) {
467                 b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
468                              " failed\n");
469                 return -ENOMEM;
470         }
471         memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE);
472
473         return 0;
474 }
475
476 static void free_ringmemory(struct b43legacy_dmaring *ring)
477 {
478         ssb_dma_free_consistent(ring->dev->dev, B43legacy_DMA_RINGMEMSIZE,
479                                 ring->descbase, ring->dmabase, GFP_KERNEL);
480 }
481
482 /* Reset the RX DMA channel */
483 static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
484                                             u16 mmio_base,
485                                             enum b43legacy_dmatype type)
486 {
487         int i;
488         u32 value;
489         u16 offset;
490
491         might_sleep();
492
493         offset = (type == B43legacy_DMA_64BIT) ?
494                  B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL;
495         b43legacy_write32(dev, mmio_base + offset, 0);
496         for (i = 0; i < 10; i++) {
497                 offset = (type == B43legacy_DMA_64BIT) ?
498                          B43legacy_DMA64_RXSTATUS : B43legacy_DMA32_RXSTATUS;
499                 value = b43legacy_read32(dev, mmio_base + offset);
500                 if (type == B43legacy_DMA_64BIT) {
501                         value &= B43legacy_DMA64_RXSTAT;
502                         if (value == B43legacy_DMA64_RXSTAT_DISABLED) {
503                                 i = -1;
504                                 break;
505                         }
506                 } else {
507                         value &= B43legacy_DMA32_RXSTATE;
508                         if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
509                                 i = -1;
510                                 break;
511                         }
512                 }
513                 msleep(1);
514         }
515         if (i != -1) {
516                 b43legacyerr(dev->wl, "DMA RX reset timed out\n");
517                 return -ENODEV;
518         }
519
520         return 0;
521 }
522
523 /* Reset the RX DMA channel */
524 static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
525                                             u16 mmio_base,
526                                             enum b43legacy_dmatype type)
527 {
528         int i;
529         u32 value;
530         u16 offset;
531
532         might_sleep();
533
534         for (i = 0; i < 10; i++) {
535                 offset = (type == B43legacy_DMA_64BIT) ?
536                          B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
537                 value = b43legacy_read32(dev, mmio_base + offset);
538                 if (type == B43legacy_DMA_64BIT) {
539                         value &= B43legacy_DMA64_TXSTAT;
540                         if (value == B43legacy_DMA64_TXSTAT_DISABLED ||
541                             value == B43legacy_DMA64_TXSTAT_IDLEWAIT ||
542                             value == B43legacy_DMA64_TXSTAT_STOPPED)
543                                 break;
544                 } else {
545                         value &= B43legacy_DMA32_TXSTATE;
546                         if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
547                             value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
548                             value == B43legacy_DMA32_TXSTAT_STOPPED)
549                                 break;
550                 }
551                 msleep(1);
552         }
553         offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_TXCTL :
554                                                  B43legacy_DMA32_TXCTL;
555         b43legacy_write32(dev, mmio_base + offset, 0);
556         for (i = 0; i < 10; i++) {
557                 offset = (type == B43legacy_DMA_64BIT) ?
558                          B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
559                 value = b43legacy_read32(dev, mmio_base + offset);
560                 if (type == B43legacy_DMA_64BIT) {
561                         value &= B43legacy_DMA64_TXSTAT;
562                         if (value == B43legacy_DMA64_TXSTAT_DISABLED) {
563                                 i = -1;
564                                 break;
565                         }
566                 } else {
567                         value &= B43legacy_DMA32_TXSTATE;
568                         if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
569                                 i = -1;
570                                 break;
571                         }
572                 }
573                 msleep(1);
574         }
575         if (i != -1) {
576                 b43legacyerr(dev->wl, "DMA TX reset timed out\n");
577                 return -ENODEV;
578         }
579         /* ensure the reset is completed. */
580         msleep(1);
581
582         return 0;
583 }
584
585 /* Check if a DMA mapping address is invalid. */
586 static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
587                                          dma_addr_t addr,
588                                          size_t buffersize,
589                                          bool dma_to_device)
590 {
591         if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
592                 return 1;
593
594         switch (ring->type) {
595         case B43legacy_DMA_30BIT:
596                 if ((u64)addr + buffersize > (1ULL << 30))
597                         goto address_error;
598                 break;
599         case B43legacy_DMA_32BIT:
600                 if ((u64)addr + buffersize > (1ULL << 32))
601                         goto address_error;
602                 break;
603         case B43legacy_DMA_64BIT:
604                 /* Currently we can't have addresses beyond 64 bits in the kernel. */
605                 break;
606         }
607
608         /* The address is OK. */
609         return 0;
610
611 address_error:
612         /* We can't support this address. Unmap it again. */
613         unmap_descbuffer(ring, addr, buffersize, dma_to_device);
614
615         return 1;
616 }
617
618 static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
619                                struct b43legacy_dmadesc_generic *desc,
620                                struct b43legacy_dmadesc_meta *meta,
621                                gfp_t gfp_flags)
622 {
623         struct b43legacy_rxhdr_fw3 *rxhdr;
624         struct b43legacy_hwtxstatus *txstat;
625         dma_addr_t dmaaddr;
626         struct sk_buff *skb;
627
628         B43legacy_WARN_ON(ring->tx);
629
630         skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
631         if (unlikely(!skb))
632                 return -ENOMEM;
633         dmaaddr = map_descbuffer(ring, skb->data,
634                                  ring->rx_buffersize, 0);
635         if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
636                 /* ugh. try to realloc in zone_dma */
637                 gfp_flags |= GFP_DMA;
638
639                 dev_kfree_skb_any(skb);
640
641                 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
642                 if (unlikely(!skb))
643                         return -ENOMEM;
644                 dmaaddr = map_descbuffer(ring, skb->data,
645                                          ring->rx_buffersize, 0);
646         }
647
648         if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
649                 dev_kfree_skb_any(skb);
650                 return -EIO;
651         }
652
653         meta->skb = skb;
654         meta->dmaaddr = dmaaddr;
655         ring->ops->fill_descriptor(ring, desc, dmaaddr,
656                                    ring->rx_buffersize, 0, 0, 0);
657
658         rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
659         rxhdr->frame_len = 0;
660         txstat = (struct b43legacy_hwtxstatus *)(skb->data);
661         txstat->cookie = 0;
662
663         return 0;
664 }
665
666 /* Allocate the initial descbuffers.
667  * This is used for an RX ring only.
668  */
669 static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
670 {
671         int i;
672         int err = -ENOMEM;
673         struct b43legacy_dmadesc_generic *desc;
674         struct b43legacy_dmadesc_meta *meta;
675
676         for (i = 0; i < ring->nr_slots; i++) {
677                 desc = ring->ops->idx2desc(ring, i, &meta);
678
679                 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
680                 if (err) {
681                         b43legacyerr(ring->dev->wl,
682                                "Failed to allocate initial descbuffers\n");
683                         goto err_unwind;
684                 }
685         }
686         mb(); /* all descbuffer setup before next line */
687         ring->used_slots = ring->nr_slots;
688         err = 0;
689 out:
690         return err;
691
692 err_unwind:
693         for (i--; i >= 0; i--) {
694                 desc = ring->ops->idx2desc(ring, i, &meta);
695
696                 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
697                 dev_kfree_skb(meta->skb);
698         }
699         goto out;
700 }
701
702 /* Do initial setup of the DMA controller.
703  * Reset the controller, write the ring busaddress
704  * and switch the "enable" bit on.
705  */
706 static int dmacontroller_setup(struct b43legacy_dmaring *ring)
707 {
708         int err = 0;
709         u32 value;
710         u32 addrext;
711         u32 trans = ssb_dma_translation(ring->dev->dev);
712
713         if (ring->tx) {
714                 if (ring->type == B43legacy_DMA_64BIT) {
715                         u64 ringbase = (u64)(ring->dmabase);
716
717                         addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
718                                   >> SSB_DMA_TRANSLATION_SHIFT;
719                         value = B43legacy_DMA64_TXENABLE;
720                         value |= (addrext << B43legacy_DMA64_TXADDREXT_SHIFT)
721                                 & B43legacy_DMA64_TXADDREXT_MASK;
722                         b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
723                                             value);
724                         b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO,
725                                             (ringbase & 0xFFFFFFFF));
726                         b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI,
727                                             ((ringbase >> 32)
728                                             & ~SSB_DMA_TRANSLATION_MASK)
729                                             | trans);
730                 } else {
731                         u32 ringbase = (u32)(ring->dmabase);
732
733                         addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
734                                   >> SSB_DMA_TRANSLATION_SHIFT;
735                         value = B43legacy_DMA32_TXENABLE;
736                         value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
737                                 & B43legacy_DMA32_TXADDREXT_MASK;
738                         b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
739                                             value);
740                         b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
741                                             (ringbase &
742                                             ~SSB_DMA_TRANSLATION_MASK)
743                                             | trans);
744                 }
745         } else {
746                 err = alloc_initial_descbuffers(ring);
747                 if (err)
748                         goto out;
749                 if (ring->type == B43legacy_DMA_64BIT) {
750                         u64 ringbase = (u64)(ring->dmabase);
751
752                         addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
753                                   >> SSB_DMA_TRANSLATION_SHIFT;
754                         value = (ring->frameoffset <<
755                                  B43legacy_DMA64_RXFROFF_SHIFT);
756                         value |= B43legacy_DMA64_RXENABLE;
757                         value |= (addrext << B43legacy_DMA64_RXADDREXT_SHIFT)
758                                 & B43legacy_DMA64_RXADDREXT_MASK;
759                         b43legacy_dma_write(ring, B43legacy_DMA64_RXCTL,
760                                             value);
761                         b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO,
762                                             (ringbase & 0xFFFFFFFF));
763                         b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI,
764                                             ((ringbase >> 32) &
765                                             ~SSB_DMA_TRANSLATION_MASK) |
766                                             trans);
767                         b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX,
768                                             200);
769                 } else {
770                         u32 ringbase = (u32)(ring->dmabase);
771
772                         addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
773                                   >> SSB_DMA_TRANSLATION_SHIFT;
774                         value = (ring->frameoffset <<
775                                  B43legacy_DMA32_RXFROFF_SHIFT);
776                         value |= B43legacy_DMA32_RXENABLE;
777                         value |= (addrext <<
778                                  B43legacy_DMA32_RXADDREXT_SHIFT)
779                                  & B43legacy_DMA32_RXADDREXT_MASK;
780                         b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL,
781                                             value);
782                         b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
783                                             (ringbase &
784                                             ~SSB_DMA_TRANSLATION_MASK)
785                                             | trans);
786                         b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
787                                             200);
788                 }
789         }
790
791 out:
792         return err;
793 }
794
795 /* Shutdown the DMA controller. */
796 static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
797 {
798         if (ring->tx) {
799                 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
800                                                  ring->type);
801                 if (ring->type == B43legacy_DMA_64BIT) {
802                         b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0);
803                         b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0);
804                 } else
805                         b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
806         } else {
807                 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
808                                                  ring->type);
809                 if (ring->type == B43legacy_DMA_64BIT) {
810                         b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0);
811                         b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0);
812                 } else
813                         b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
814         }
815 }
816
817 static void free_all_descbuffers(struct b43legacy_dmaring *ring)
818 {
819         struct b43legacy_dmadesc_generic *desc;
820         struct b43legacy_dmadesc_meta *meta;
821         int i;
822
823         if (!ring->used_slots)
824                 return;
825         for (i = 0; i < ring->nr_slots; i++) {
826                 desc = ring->ops->idx2desc(ring, i, &meta);
827
828                 if (!meta->skb) {
829                         B43legacy_WARN_ON(!ring->tx);
830                         continue;
831                 }
832                 if (ring->tx)
833                         unmap_descbuffer(ring, meta->dmaaddr,
834                                          meta->skb->len, 1);
835                 else
836                         unmap_descbuffer(ring, meta->dmaaddr,
837                                          ring->rx_buffersize, 0);
838                 free_descriptor_buffer(ring, meta, 0);
839         }
840 }
841
842 static u64 supported_dma_mask(struct b43legacy_wldev *dev)
843 {
844         u32 tmp;
845         u16 mmio_base;
846
847         tmp = b43legacy_read32(dev, SSB_TMSHIGH);
848         if (tmp & SSB_TMSHIGH_DMA64)
849                 return DMA_64BIT_MASK;
850         mmio_base = b43legacy_dmacontroller_base(0, 0);
851         b43legacy_write32(dev,
852                         mmio_base + B43legacy_DMA32_TXCTL,
853                         B43legacy_DMA32_TXADDREXT_MASK);
854         tmp = b43legacy_read32(dev, mmio_base +
855                                B43legacy_DMA32_TXCTL);
856         if (tmp & B43legacy_DMA32_TXADDREXT_MASK)
857                 return DMA_32BIT_MASK;
858
859         return DMA_30BIT_MASK;
860 }
861
862 /* Main initialization function. */
863 static
864 struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
865                                                   int controller_index,
866                                                   int for_tx,
867                                                   enum b43legacy_dmatype type)
868 {
869         struct b43legacy_dmaring *ring;
870         int err;
871         int nr_slots;
872         dma_addr_t dma_test;
873
874         ring = kzalloc(sizeof(*ring), GFP_KERNEL);
875         if (!ring)
876                 goto out;
877         ring->type = type;
878         ring->dev = dev;
879
880         nr_slots = B43legacy_RXRING_SLOTS;
881         if (for_tx)
882                 nr_slots = B43legacy_TXRING_SLOTS;
883
884         ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
885                              GFP_KERNEL);
886         if (!ring->meta)
887                 goto err_kfree_ring;
888         if (for_tx) {
889                 ring->txhdr_cache = kcalloc(nr_slots,
890                                         sizeof(struct b43legacy_txhdr_fw3),
891                                         GFP_KERNEL);
892                 if (!ring->txhdr_cache)
893                         goto err_kfree_meta;
894
895                 /* test for ability to dma to txhdr_cache */
896                 dma_test = ssb_dma_map_single(dev->dev, ring->txhdr_cache,
897                                               sizeof(struct b43legacy_txhdr_fw3),
898                                               DMA_TO_DEVICE);
899
900                 if (b43legacy_dma_mapping_error(ring, dma_test,
901                                         sizeof(struct b43legacy_txhdr_fw3), 1)) {
902                         /* ugh realloc */
903                         kfree(ring->txhdr_cache);
904                         ring->txhdr_cache = kcalloc(nr_slots,
905                                         sizeof(struct b43legacy_txhdr_fw3),
906                                         GFP_KERNEL | GFP_DMA);
907                         if (!ring->txhdr_cache)
908                                 goto err_kfree_meta;
909
910                                 dma_test = ssb_dma_map_single(dev->dev,
911                                         ring->txhdr_cache,
912                                         sizeof(struct b43legacy_txhdr_fw3),
913                                         DMA_TO_DEVICE);
914
915                         if (b43legacy_dma_mapping_error(ring, dma_test,
916                                         sizeof(struct b43legacy_txhdr_fw3), 1))
917                                 goto err_kfree_txhdr_cache;
918                 }
919
920                 ssb_dma_unmap_single(dev->dev, dma_test,
921                                      sizeof(struct b43legacy_txhdr_fw3),
922                                      DMA_TO_DEVICE);
923         }
924
925         ring->nr_slots = nr_slots;
926         ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
927         ring->index = controller_index;
928         if (type == B43legacy_DMA_64BIT)
929                 ring->ops = &dma64_ops;
930         else
931                 ring->ops = &dma32_ops;
932         if (for_tx) {
933                 ring->tx = 1;
934                 ring->current_slot = -1;
935         } else {
936                 if (ring->index == 0) {
937                         ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
938                         ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
939                 } else if (ring->index == 3) {
940                         ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
941                         ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
942                 } else
943                         B43legacy_WARN_ON(1);
944         }
945         spin_lock_init(&ring->lock);
946 #ifdef CONFIG_B43LEGACY_DEBUG
947         ring->last_injected_overflow = jiffies;
948 #endif
949
950         err = alloc_ringmemory(ring);
951         if (err)
952                 goto err_kfree_txhdr_cache;
953         err = dmacontroller_setup(ring);
954         if (err)
955                 goto err_free_ringmemory;
956
957 out:
958         return ring;
959
960 err_free_ringmemory:
961         free_ringmemory(ring);
962 err_kfree_txhdr_cache:
963         kfree(ring->txhdr_cache);
964 err_kfree_meta:
965         kfree(ring->meta);
966 err_kfree_ring:
967         kfree(ring);
968         ring = NULL;
969         goto out;
970 }
971
972 /* Main cleanup function. */
973 static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
974 {
975         if (!ring)
976                 return;
977
978         b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
979                      " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
980                      (ring->tx) ? "TX" : "RX", ring->max_used_slots,
981                      ring->nr_slots);
982         /* Device IRQs are disabled prior entering this function,
983          * so no need to take care of concurrency with rx handler stuff.
984          */
985         dmacontroller_cleanup(ring);
986         free_all_descbuffers(ring);
987         free_ringmemory(ring);
988
989         kfree(ring->txhdr_cache);
990         kfree(ring->meta);
991         kfree(ring);
992 }
993
994 void b43legacy_dma_free(struct b43legacy_wldev *dev)
995 {
996         struct b43legacy_dma *dma;
997
998         if (b43legacy_using_pio(dev))
999                 return;
1000         dma = &dev->dma;
1001
1002         b43legacy_destroy_dmaring(dma->rx_ring3);
1003         dma->rx_ring3 = NULL;
1004         b43legacy_destroy_dmaring(dma->rx_ring0);
1005         dma->rx_ring0 = NULL;
1006
1007         b43legacy_destroy_dmaring(dma->tx_ring5);
1008         dma->tx_ring5 = NULL;
1009         b43legacy_destroy_dmaring(dma->tx_ring4);
1010         dma->tx_ring4 = NULL;
1011         b43legacy_destroy_dmaring(dma->tx_ring3);
1012         dma->tx_ring3 = NULL;
1013         b43legacy_destroy_dmaring(dma->tx_ring2);
1014         dma->tx_ring2 = NULL;
1015         b43legacy_destroy_dmaring(dma->tx_ring1);
1016         dma->tx_ring1 = NULL;
1017         b43legacy_destroy_dmaring(dma->tx_ring0);
1018         dma->tx_ring0 = NULL;
1019 }
1020
1021 int b43legacy_dma_init(struct b43legacy_wldev *dev)
1022 {
1023         struct b43legacy_dma *dma = &dev->dma;
1024         struct b43legacy_dmaring *ring;
1025         int err;
1026         u64 dmamask;
1027         enum b43legacy_dmatype type;
1028
1029         dmamask = supported_dma_mask(dev);
1030         switch (dmamask) {
1031         default:
1032                 B43legacy_WARN_ON(1);
1033         case DMA_30BIT_MASK:
1034                 type = B43legacy_DMA_30BIT;
1035                 break;
1036         case DMA_32BIT_MASK:
1037                 type = B43legacy_DMA_32BIT;
1038                 break;
1039         case DMA_64BIT_MASK:
1040                 type = B43legacy_DMA_64BIT;
1041                 break;
1042         }
1043
1044         err = ssb_dma_set_mask(dev->dev, dmamask);
1045         if (err) {
1046 #ifdef CONFIG_B43LEGACY_PIO
1047                 b43legacywarn(dev->wl, "DMA for this device not supported. "
1048                         "Falling back to PIO\n");
1049                 dev->__using_pio = 1;
1050                 return -EAGAIN;
1051 #else
1052                 b43legacyerr(dev->wl, "DMA for this device not supported and "
1053                        "no PIO support compiled in\n");
1054                 return -EOPNOTSUPP;
1055 #endif
1056         }
1057
1058         err = -ENOMEM;
1059         /* setup TX DMA channels. */
1060         ring = b43legacy_setup_dmaring(dev, 0, 1, type);
1061         if (!ring)
1062                 goto out;
1063         dma->tx_ring0 = ring;
1064
1065         ring = b43legacy_setup_dmaring(dev, 1, 1, type);
1066         if (!ring)
1067                 goto err_destroy_tx0;
1068         dma->tx_ring1 = ring;
1069
1070         ring = b43legacy_setup_dmaring(dev, 2, 1, type);
1071         if (!ring)
1072                 goto err_destroy_tx1;
1073         dma->tx_ring2 = ring;
1074
1075         ring = b43legacy_setup_dmaring(dev, 3, 1, type);
1076         if (!ring)
1077                 goto err_destroy_tx2;
1078         dma->tx_ring3 = ring;
1079
1080         ring = b43legacy_setup_dmaring(dev, 4, 1, type);
1081         if (!ring)
1082                 goto err_destroy_tx3;
1083         dma->tx_ring4 = ring;
1084
1085         ring = b43legacy_setup_dmaring(dev, 5, 1, type);
1086         if (!ring)
1087                 goto err_destroy_tx4;
1088         dma->tx_ring5 = ring;
1089
1090         /* setup RX DMA channels. */
1091         ring = b43legacy_setup_dmaring(dev, 0, 0, type);
1092         if (!ring)
1093                 goto err_destroy_tx5;
1094         dma->rx_ring0 = ring;
1095
1096         if (dev->dev->id.revision < 5) {
1097                 ring = b43legacy_setup_dmaring(dev, 3, 0, type);
1098                 if (!ring)
1099                         goto err_destroy_rx0;
1100                 dma->rx_ring3 = ring;
1101         }
1102
1103         b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
1104         err = 0;
1105 out:
1106         return err;
1107
1108 err_destroy_rx0:
1109         b43legacy_destroy_dmaring(dma->rx_ring0);
1110         dma->rx_ring0 = NULL;
1111 err_destroy_tx5:
1112         b43legacy_destroy_dmaring(dma->tx_ring5);
1113         dma->tx_ring5 = NULL;
1114 err_destroy_tx4:
1115         b43legacy_destroy_dmaring(dma->tx_ring4);
1116         dma->tx_ring4 = NULL;
1117 err_destroy_tx3:
1118         b43legacy_destroy_dmaring(dma->tx_ring3);
1119         dma->tx_ring3 = NULL;
1120 err_destroy_tx2:
1121         b43legacy_destroy_dmaring(dma->tx_ring2);
1122         dma->tx_ring2 = NULL;
1123 err_destroy_tx1:
1124         b43legacy_destroy_dmaring(dma->tx_ring1);
1125         dma->tx_ring1 = NULL;
1126 err_destroy_tx0:
1127         b43legacy_destroy_dmaring(dma->tx_ring0);
1128         dma->tx_ring0 = NULL;
1129         goto out;
1130 }
1131
1132 /* Generate a cookie for the TX header. */
1133 static u16 generate_cookie(struct b43legacy_dmaring *ring,
1134                            int slot)
1135 {
1136         u16 cookie = 0x1000;
1137
1138         /* Use the upper 4 bits of the cookie as
1139          * DMA controller ID and store the slot number
1140          * in the lower 12 bits.
1141          * Note that the cookie must never be 0, as this
1142          * is a special value used in RX path.
1143          */
1144         switch (ring->index) {
1145         case 0:
1146                 cookie = 0xA000;
1147                 break;
1148         case 1:
1149                 cookie = 0xB000;
1150                 break;
1151         case 2:
1152                 cookie = 0xC000;
1153                 break;
1154         case 3:
1155                 cookie = 0xD000;
1156                 break;
1157         case 4:
1158                 cookie = 0xE000;
1159                 break;
1160         case 5:
1161                 cookie = 0xF000;
1162                 break;
1163         }
1164         B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000));
1165         cookie |= (u16)slot;
1166
1167         return cookie;
1168 }
1169
1170 /* Inspect a cookie and find out to which controller/slot it belongs. */
1171 static
1172 struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
1173                                       u16 cookie, int *slot)
1174 {
1175         struct b43legacy_dma *dma = &dev->dma;
1176         struct b43legacy_dmaring *ring = NULL;
1177
1178         switch (cookie & 0xF000) {
1179         case 0xA000:
1180                 ring = dma->tx_ring0;
1181                 break;
1182         case 0xB000:
1183                 ring = dma->tx_ring1;
1184                 break;
1185         case 0xC000:
1186                 ring = dma->tx_ring2;
1187                 break;
1188         case 0xD000:
1189                 ring = dma->tx_ring3;
1190                 break;
1191         case 0xE000:
1192                 ring = dma->tx_ring4;
1193                 break;
1194         case 0xF000:
1195                 ring = dma->tx_ring5;
1196                 break;
1197         default:
1198                 B43legacy_WARN_ON(1);
1199         }
1200         *slot = (cookie & 0x0FFF);
1201         B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1202
1203         return ring;
1204 }
1205
1206 static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1207                             struct sk_buff *skb)
1208 {
1209         const struct b43legacy_dma_ops *ops = ring->ops;
1210         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1211         u8 *header;
1212         int slot, old_top_slot, old_used_slots;
1213         int err;
1214         struct b43legacy_dmadesc_generic *desc;
1215         struct b43legacy_dmadesc_meta *meta;
1216         struct b43legacy_dmadesc_meta *meta_hdr;
1217         struct sk_buff *bounce_skb;
1218
1219 #define SLOTS_PER_PACKET  2
1220         B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
1221
1222         old_top_slot = ring->current_slot;
1223         old_used_slots = ring->used_slots;
1224
1225         /* Get a slot for the header. */
1226         slot = request_slot(ring);
1227         desc = ops->idx2desc(ring, slot, &meta_hdr);
1228         memset(meta_hdr, 0, sizeof(*meta_hdr));
1229
1230         header = &(ring->txhdr_cache[slot * sizeof(
1231                                struct b43legacy_txhdr_fw3)]);
1232         err = b43legacy_generate_txhdr(ring->dev, header,
1233                                  skb->data, skb->len, info,
1234                                  generate_cookie(ring, slot));
1235         if (unlikely(err)) {
1236                 ring->current_slot = old_top_slot;
1237                 ring->used_slots = old_used_slots;
1238                 return err;
1239         }
1240
1241         meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1242                                            sizeof(struct b43legacy_txhdr_fw3), 1);
1243         if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
1244                                         sizeof(struct b43legacy_txhdr_fw3), 1)) {
1245                 ring->current_slot = old_top_slot;
1246                 ring->used_slots = old_used_slots;
1247                 return -EIO;
1248         }
1249         ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1250                              sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
1251
1252         /* Get a slot for the payload. */
1253         slot = request_slot(ring);
1254         desc = ops->idx2desc(ring, slot, &meta);
1255         memset(meta, 0, sizeof(*meta));
1256
1257         meta->skb = skb;
1258         meta->is_last_fragment = 1;
1259
1260         meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1261         /* create a bounce buffer in zone_dma on mapping failure. */
1262         if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1263                 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1264                 if (!bounce_skb) {
1265                         ring->current_slot = old_top_slot;
1266                         ring->used_slots = old_used_slots;
1267                         err = -ENOMEM;
1268                         goto out_unmap_hdr;
1269                 }
1270
1271                 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1272                 dev_kfree_skb_any(skb);
1273                 skb = bounce_skb;
1274                 meta->skb = skb;
1275                 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1276                 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1277                         ring->current_slot = old_top_slot;
1278                         ring->used_slots = old_used_slots;
1279                         err = -EIO;
1280                         goto out_free_bounce;
1281                 }
1282         }
1283
1284         ops->fill_descriptor(ring, desc, meta->dmaaddr,
1285                              skb->len, 0, 1, 1);
1286
1287         wmb();  /* previous stuff MUST be done */
1288         /* Now transfer the whole frame. */
1289         ops->poke_tx(ring, next_slot(ring, slot));
1290         return 0;
1291
1292 out_free_bounce:
1293         dev_kfree_skb_any(skb);
1294 out_unmap_hdr:
1295         unmap_descbuffer(ring, meta_hdr->dmaaddr,
1296                          sizeof(struct b43legacy_txhdr_fw3), 1);
1297         return err;
1298 }
1299
1300 static inline
1301 int should_inject_overflow(struct b43legacy_dmaring *ring)
1302 {
1303 #ifdef CONFIG_B43LEGACY_DEBUG
1304         if (unlikely(b43legacy_debug(ring->dev,
1305                                      B43legacy_DBG_DMAOVERFLOW))) {
1306                 /* Check if we should inject another ringbuffer overflow
1307                  * to test handling of this situation in the stack. */
1308                 unsigned long next_overflow;
1309
1310                 next_overflow = ring->last_injected_overflow + HZ;
1311                 if (time_after(jiffies, next_overflow)) {
1312                         ring->last_injected_overflow = jiffies;
1313                         b43legacydbg(ring->dev->wl,
1314                                "Injecting TX ring overflow on "
1315                                "DMA controller %d\n", ring->index);
1316                         return 1;
1317                 }
1318         }
1319 #endif /* CONFIG_B43LEGACY_DEBUG */
1320         return 0;
1321 }
1322
1323 int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1324                      struct sk_buff *skb)
1325 {
1326         struct b43legacy_dmaring *ring;
1327         int err = 0;
1328         unsigned long flags;
1329
1330         ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1331         spin_lock_irqsave(&ring->lock, flags);
1332         B43legacy_WARN_ON(!ring->tx);
1333         if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
1334                 b43legacywarn(dev->wl, "DMA queue overflow\n");
1335                 err = -ENOSPC;
1336                 goto out_unlock;
1337         }
1338         /* Check if the queue was stopped in mac80211,
1339          * but we got called nevertheless.
1340          * That would be a mac80211 bug. */
1341         B43legacy_BUG_ON(ring->stopped);
1342
1343         err = dma_tx_fragment(ring, skb);
1344         if (unlikely(err == -ENOKEY)) {
1345                 /* Drop this packet, as we don't have the encryption key
1346                  * anymore and must not transmit it unencrypted. */
1347                 dev_kfree_skb_any(skb);
1348                 err = 0;
1349                 goto out_unlock;
1350         }
1351         if (unlikely(err)) {
1352                 b43legacyerr(dev->wl, "DMA tx mapping failure\n");
1353                 goto out_unlock;
1354         }
1355         ring->nr_tx_packets++;
1356         if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1357             should_inject_overflow(ring)) {
1358                 /* This TX ring is full. */
1359                 ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
1360                 ring->stopped = 1;
1361                 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1362                         b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1363                                ring->index);
1364         }
1365 out_unlock:
1366         spin_unlock_irqrestore(&ring->lock, flags);
1367
1368         return err;
1369 }
1370
1371 void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1372                                  const struct b43legacy_txstatus *status)
1373 {
1374         const struct b43legacy_dma_ops *ops;
1375         struct b43legacy_dmaring *ring;
1376         struct b43legacy_dmadesc_generic *desc;
1377         struct b43legacy_dmadesc_meta *meta;
1378         int slot;
1379
1380         ring = parse_cookie(dev, status->cookie, &slot);
1381         if (unlikely(!ring))
1382                 return;
1383         B43legacy_WARN_ON(!irqs_disabled());
1384         spin_lock(&ring->lock);
1385
1386         B43legacy_WARN_ON(!ring->tx);
1387         ops = ring->ops;
1388         while (1) {
1389                 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1390                 desc = ops->idx2desc(ring, slot, &meta);
1391
1392                 if (meta->skb)
1393                         unmap_descbuffer(ring, meta->dmaaddr,
1394                                          meta->skb->len, 1);
1395                 else
1396                         unmap_descbuffer(ring, meta->dmaaddr,
1397                                          sizeof(struct b43legacy_txhdr_fw3),
1398                                          1);
1399
1400                 if (meta->is_last_fragment) {
1401                         struct ieee80211_tx_info *info;
1402                         BUG_ON(!meta->skb);
1403                         info = IEEE80211_SKB_CB(meta->skb);
1404                         /* Call back to inform the ieee80211 subsystem about the
1405                          * status of the transmission.
1406                          * Some fields of txstat are already filled in dma_tx().
1407                          */
1408
1409                         memset(&info->status, 0, sizeof(info->status));
1410
1411                         if (status->acked) {
1412                                 info->flags |= IEEE80211_TX_STAT_ACK;
1413                         } else {
1414                                 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
1415                                          info->status.excessive_retries = 1;
1416                         }
1417                         if (status->frame_count == 0) {
1418                                 /* The frame was not transmitted at all. */
1419                                 info->status.retry_count = 0;
1420                         } else
1421                                 info->status.retry_count = status->frame_count
1422                                                            - 1;
1423                         ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1424                         /* skb is freed by ieee80211_tx_status_irqsafe() */
1425                         meta->skb = NULL;
1426                 } else {
1427                         /* No need to call free_descriptor_buffer here, as
1428                          * this is only the txhdr, which is not allocated.
1429                          */
1430                         B43legacy_WARN_ON(meta->skb != NULL);
1431                 }
1432
1433                 /* Everything unmapped and free'd. So it's not used anymore. */
1434                 ring->used_slots--;
1435
1436                 if (meta->is_last_fragment)
1437                         break;
1438                 slot = next_slot(ring, slot);
1439         }
1440         dev->stats.last_tx = jiffies;
1441         if (ring->stopped) {
1442                 B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1443                 ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
1444                 ring->stopped = 0;
1445                 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1446                         b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1447                                ring->index);
1448         }
1449
1450         spin_unlock(&ring->lock);
1451 }
1452
1453 void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
1454                               struct ieee80211_tx_queue_stats *stats)
1455 {
1456         const int nr_queues = dev->wl->hw->queues;
1457         struct b43legacy_dmaring *ring;
1458         unsigned long flags;
1459         int i;
1460
1461         for (i = 0; i < nr_queues; i++) {
1462                 ring = priority_to_txring(dev, i);
1463
1464                 spin_lock_irqsave(&ring->lock, flags);
1465                 stats[i].len = ring->used_slots / SLOTS_PER_PACKET;
1466                 stats[i].limit = ring->nr_slots / SLOTS_PER_PACKET;
1467                 stats[i].count = ring->nr_tx_packets;
1468                 spin_unlock_irqrestore(&ring->lock, flags);
1469         }
1470 }
1471
1472 static void dma_rx(struct b43legacy_dmaring *ring,
1473                    int *slot)
1474 {
1475         const struct b43legacy_dma_ops *ops = ring->ops;
1476         struct b43legacy_dmadesc_generic *desc;
1477         struct b43legacy_dmadesc_meta *meta;
1478         struct b43legacy_rxhdr_fw3 *rxhdr;
1479         struct sk_buff *skb;
1480         u16 len;
1481         int err;
1482         dma_addr_t dmaaddr;
1483
1484         desc = ops->idx2desc(ring, *slot, &meta);
1485
1486         sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1487         skb = meta->skb;
1488
1489         if (ring->index == 3) {
1490                 /* We received an xmit status. */
1491                 struct b43legacy_hwtxstatus *hw =
1492                                 (struct b43legacy_hwtxstatus *)skb->data;
1493                 int i = 0;
1494
1495                 while (hw->cookie == 0) {
1496                         if (i > 100)
1497                                 break;
1498                         i++;
1499                         udelay(2);
1500                         barrier();
1501                 }
1502                 b43legacy_handle_hwtxstatus(ring->dev, hw);
1503                 /* recycle the descriptor buffer. */
1504                 sync_descbuffer_for_device(ring, meta->dmaaddr,
1505                                            ring->rx_buffersize);
1506
1507                 return;
1508         }
1509         rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;
1510         len = le16_to_cpu(rxhdr->frame_len);
1511         if (len == 0) {
1512                 int i = 0;
1513
1514                 do {
1515                         udelay(2);
1516                         barrier();
1517                         len = le16_to_cpu(rxhdr->frame_len);
1518                 } while (len == 0 && i++ < 5);
1519                 if (unlikely(len == 0)) {
1520                         /* recycle the descriptor buffer. */
1521                         sync_descbuffer_for_device(ring, meta->dmaaddr,
1522                                                    ring->rx_buffersize);
1523                         goto drop;
1524                 }
1525         }
1526         if (unlikely(len > ring->rx_buffersize)) {
1527                 /* The data did not fit into one descriptor buffer
1528                  * and is split over multiple buffers.
1529                  * This should never happen, as we try to allocate buffers
1530                  * big enough. So simply ignore this packet.
1531                  */
1532                 int cnt = 0;
1533                 s32 tmp = len;
1534
1535                 while (1) {
1536                         desc = ops->idx2desc(ring, *slot, &meta);
1537                         /* recycle the descriptor buffer. */
1538                         sync_descbuffer_for_device(ring, meta->dmaaddr,
1539                                                    ring->rx_buffersize);
1540                         *slot = next_slot(ring, *slot);
1541                         cnt++;
1542                         tmp -= ring->rx_buffersize;
1543                         if (tmp <= 0)
1544                                 break;
1545                 }
1546                 b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1547                        "(len: %u, buffer: %u, nr-dropped: %d)\n",
1548                        len, ring->rx_buffersize, cnt);
1549                 goto drop;
1550         }
1551
1552         dmaaddr = meta->dmaaddr;
1553         err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1554         if (unlikely(err)) {
1555                 b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1556                              " failed\n");
1557                 sync_descbuffer_for_device(ring, dmaaddr,
1558                                            ring->rx_buffersize);
1559                 goto drop;
1560         }
1561
1562         unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1563         skb_put(skb, len + ring->frameoffset);
1564         skb_pull(skb, ring->frameoffset);
1565
1566         b43legacy_rx(ring->dev, skb, rxhdr);
1567 drop:
1568         return;
1569 }
1570
1571 void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1572 {
1573         const struct b43legacy_dma_ops *ops = ring->ops;
1574         int slot;
1575         int current_slot;
1576         int used_slots = 0;
1577
1578         B43legacy_WARN_ON(ring->tx);
1579         current_slot = ops->get_current_rxslot(ring);
1580         B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <
1581                            ring->nr_slots));
1582
1583         slot = ring->current_slot;
1584         for (; slot != current_slot; slot = next_slot(ring, slot)) {
1585                 dma_rx(ring, &slot);
1586                 update_max_used_slots(ring, ++used_slots);
1587         }
1588         ops->set_current_rxslot(ring, slot);
1589         ring->current_slot = slot;
1590 }
1591
1592 static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
1593 {
1594         unsigned long flags;
1595
1596         spin_lock_irqsave(&ring->lock, flags);
1597         B43legacy_WARN_ON(!ring->tx);
1598         ring->ops->tx_suspend(ring);
1599         spin_unlock_irqrestore(&ring->lock, flags);
1600 }
1601
1602 static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
1603 {
1604         unsigned long flags;
1605
1606         spin_lock_irqsave(&ring->lock, flags);
1607         B43legacy_WARN_ON(!ring->tx);
1608         ring->ops->tx_resume(ring);
1609         spin_unlock_irqrestore(&ring->lock, flags);
1610 }
1611
1612 void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
1613 {
1614         b43legacy_power_saving_ctl_bits(dev, -1, 1);
1615         b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);
1616         b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);
1617         b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);
1618         b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);
1619         b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);
1620         b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);
1621 }
1622
1623 void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev)
1624 {
1625         b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);
1626         b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);
1627         b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);
1628         b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);
1629         b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);
1630         b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);
1631         b43legacy_power_saving_ctl_bits(dev, -1, -1);
1632 }