b44: replace the ssb_dma API with the generic DMA API
[linux-2.6.git] / drivers / net / b44.c
1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6  * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7  * Copyright (C) 2006 Broadcom Corporation.
8  * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de>
9  *
10  * Distribute under GPL.
11  */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/mii.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
24 #include <linux/etherdevice.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/ssb/ssb.h>
30 #include <linux/slab.h>
31
32 #include <asm/uaccess.h>
33 #include <asm/io.h>
34 #include <asm/irq.h>
35
36
37 #include "b44.h"
38
39 #define DRV_MODULE_NAME         "b44"
40 #define DRV_MODULE_VERSION      "2.0"
41
42 #define B44_DEF_MSG_ENABLE        \
43         (NETIF_MSG_DRV          | \
44          NETIF_MSG_PROBE        | \
45          NETIF_MSG_LINK         | \
46          NETIF_MSG_TIMER        | \
47          NETIF_MSG_IFDOWN       | \
48          NETIF_MSG_IFUP         | \
49          NETIF_MSG_RX_ERR       | \
50          NETIF_MSG_TX_ERR)
51
52 /* length of time before we decide the hardware is borked,
53  * and dev->tx_timeout() should be called to fix the problem
54  */
55 #define B44_TX_TIMEOUT                  (5 * HZ)
56
57 /* hardware minimum and maximum for a single frame's data payload */
58 #define B44_MIN_MTU                     60
59 #define B44_MAX_MTU                     1500
60
61 #define B44_RX_RING_SIZE                512
62 #define B44_DEF_RX_RING_PENDING         200
63 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
64                                  B44_RX_RING_SIZE)
65 #define B44_TX_RING_SIZE                512
66 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
67 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
68                                  B44_TX_RING_SIZE)
69
70 #define TX_RING_GAP(BP) \
71         (B44_TX_RING_SIZE - (BP)->tx_pending)
72 #define TX_BUFFS_AVAIL(BP)                                              \
73         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
74           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
75           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
76 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
77
78 #define RX_PKT_OFFSET           (RX_HEADER_LEN + 2)
79 #define RX_PKT_BUF_SZ           (1536 + RX_PKT_OFFSET)
80
81 /* minimum number of free TX descriptors required to wake up TX process */
82 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
83
84 /* b44 internal pattern match filter info */
85 #define B44_PATTERN_BASE        0x400
86 #define B44_PATTERN_SIZE        0x80
87 #define B44_PMASK_BASE          0x600
88 #define B44_PMASK_SIZE          0x10
89 #define B44_MAX_PATTERNS        16
90 #define B44_ETHIPV6UDP_HLEN     62
91 #define B44_ETHIPV4UDP_HLEN     42
92
93 static char version[] __devinitdata =
94         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n";
95
96 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
97 MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver");
98 MODULE_LICENSE("GPL");
99 MODULE_VERSION(DRV_MODULE_VERSION);
100
101 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
102 module_param(b44_debug, int, 0);
103 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
104
105
106 #ifdef CONFIG_B44_PCI
107 static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
108         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
109         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
110         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
111         { 0 } /* terminate list with empty entry */
112 };
113 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
114
115 static struct pci_driver b44_pci_driver = {
116         .name           = DRV_MODULE_NAME,
117         .id_table       = b44_pci_tbl,
118 };
119 #endif /* CONFIG_B44_PCI */
120
121 static const struct ssb_device_id b44_ssb_tbl[] = {
122         SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
123         SSB_DEVTABLE_END
124 };
125 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
126
127 static void b44_halt(struct b44 *);
128 static void b44_init_rings(struct b44 *);
129
130 #define B44_FULL_RESET          1
131 #define B44_FULL_RESET_SKIP_PHY 2
132 #define B44_PARTIAL_RESET       3
133 #define B44_CHIP_RESET_FULL     4
134 #define B44_CHIP_RESET_PARTIAL  5
135
136 static void b44_init_hw(struct b44 *, int);
137
138 static int dma_desc_align_mask;
139 static int dma_desc_sync_size;
140 static int instance;
141
142 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
143 #define _B44(x...)      # x,
144 B44_STAT_REG_DECLARE
145 #undef _B44
146 };
147
148 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
149                                                 dma_addr_t dma_base,
150                                                 unsigned long offset,
151                                                 enum dma_data_direction dir)
152 {
153         dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
154                                    dma_desc_sync_size, dir);
155 }
156
157 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
158                                              dma_addr_t dma_base,
159                                              unsigned long offset,
160                                              enum dma_data_direction dir)
161 {
162         dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
163                                 dma_desc_sync_size, dir);
164 }
165
166 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
167 {
168         return ssb_read32(bp->sdev, reg);
169 }
170
171 static inline void bw32(const struct b44 *bp,
172                         unsigned long reg, unsigned long val)
173 {
174         ssb_write32(bp->sdev, reg, val);
175 }
176
177 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
178                         u32 bit, unsigned long timeout, const int clear)
179 {
180         unsigned long i;
181
182         for (i = 0; i < timeout; i++) {
183                 u32 val = br32(bp, reg);
184
185                 if (clear && !(val & bit))
186                         break;
187                 if (!clear && (val & bit))
188                         break;
189                 udelay(10);
190         }
191         if (i == timeout) {
192                 if (net_ratelimit())
193                         netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
194                                    bit, reg, clear ? "clear" : "set");
195
196                 return -ENODEV;
197         }
198         return 0;
199 }
200
201 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
202 {
203         u32 val;
204
205         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
206                             (index << CAM_CTRL_INDEX_SHIFT)));
207
208         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
209
210         val = br32(bp, B44_CAM_DATA_LO);
211
212         data[2] = (val >> 24) & 0xFF;
213         data[3] = (val >> 16) & 0xFF;
214         data[4] = (val >> 8) & 0xFF;
215         data[5] = (val >> 0) & 0xFF;
216
217         val = br32(bp, B44_CAM_DATA_HI);
218
219         data[0] = (val >> 8) & 0xFF;
220         data[1] = (val >> 0) & 0xFF;
221 }
222
223 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
224 {
225         u32 val;
226
227         val  = ((u32) data[2]) << 24;
228         val |= ((u32) data[3]) << 16;
229         val |= ((u32) data[4]) <<  8;
230         val |= ((u32) data[5]) <<  0;
231         bw32(bp, B44_CAM_DATA_LO, val);
232         val = (CAM_DATA_HI_VALID |
233                (((u32) data[0]) << 8) |
234                (((u32) data[1]) << 0));
235         bw32(bp, B44_CAM_DATA_HI, val);
236         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
237                             (index << CAM_CTRL_INDEX_SHIFT)));
238         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
239 }
240
241 static inline void __b44_disable_ints(struct b44 *bp)
242 {
243         bw32(bp, B44_IMASK, 0);
244 }
245
246 static void b44_disable_ints(struct b44 *bp)
247 {
248         __b44_disable_ints(bp);
249
250         /* Flush posted writes. */
251         br32(bp, B44_IMASK);
252 }
253
254 static void b44_enable_ints(struct b44 *bp)
255 {
256         bw32(bp, B44_IMASK, bp->imask);
257 }
258
259 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
260 {
261         int err;
262
263         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
264         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
265                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
266                              (phy_addr << MDIO_DATA_PMD_SHIFT) |
267                              (reg << MDIO_DATA_RA_SHIFT) |
268                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
269         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
270         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
271
272         return err;
273 }
274
275 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
276 {
277         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
278         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
279                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
280                              (phy_addr << MDIO_DATA_PMD_SHIFT) |
281                              (reg << MDIO_DATA_RA_SHIFT) |
282                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
283                              (val & MDIO_DATA_DATA)));
284         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
285 }
286
287 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
288 {
289         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
290                 return 0;
291
292         return __b44_readphy(bp, bp->phy_addr, reg, val);
293 }
294
295 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
296 {
297         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
298                 return 0;
299
300         return __b44_writephy(bp, bp->phy_addr, reg, val);
301 }
302
303 /* miilib interface */
304 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
305 {
306         u32 val;
307         struct b44 *bp = netdev_priv(dev);
308         int rc = __b44_readphy(bp, phy_id, location, &val);
309         if (rc)
310                 return 0xffffffff;
311         return val;
312 }
313
314 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
315                          int val)
316 {
317         struct b44 *bp = netdev_priv(dev);
318         __b44_writephy(bp, phy_id, location, val);
319 }
320
321 static int b44_phy_reset(struct b44 *bp)
322 {
323         u32 val;
324         int err;
325
326         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
327                 return 0;
328         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
329         if (err)
330                 return err;
331         udelay(100);
332         err = b44_readphy(bp, MII_BMCR, &val);
333         if (!err) {
334                 if (val & BMCR_RESET) {
335                         netdev_err(bp->dev, "PHY Reset would not complete\n");
336                         err = -ENODEV;
337                 }
338         }
339
340         return err;
341 }
342
343 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
344 {
345         u32 val;
346
347         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
348         bp->flags |= pause_flags;
349
350         val = br32(bp, B44_RXCONFIG);
351         if (pause_flags & B44_FLAG_RX_PAUSE)
352                 val |= RXCONFIG_FLOW;
353         else
354                 val &= ~RXCONFIG_FLOW;
355         bw32(bp, B44_RXCONFIG, val);
356
357         val = br32(bp, B44_MAC_FLOW);
358         if (pause_flags & B44_FLAG_TX_PAUSE)
359                 val |= (MAC_FLOW_PAUSE_ENAB |
360                         (0xc0 & MAC_FLOW_RX_HI_WATER));
361         else
362                 val &= ~MAC_FLOW_PAUSE_ENAB;
363         bw32(bp, B44_MAC_FLOW, val);
364 }
365
366 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
367 {
368         u32 pause_enab = 0;
369
370         /* The driver supports only rx pause by default because
371            the b44 mac tx pause mechanism generates excessive
372            pause frames.
373            Use ethtool to turn on b44 tx pause if necessary.
374          */
375         if ((local & ADVERTISE_PAUSE_CAP) &&
376             (local & ADVERTISE_PAUSE_ASYM)){
377                 if ((remote & LPA_PAUSE_ASYM) &&
378                     !(remote & LPA_PAUSE_CAP))
379                         pause_enab |= B44_FLAG_RX_PAUSE;
380         }
381
382         __b44_set_flow_ctrl(bp, pause_enab);
383 }
384
385 #ifdef SSB_DRIVER_MIPS
386 extern char *nvram_get(char *name);
387 static void b44_wap54g10_workaround(struct b44 *bp)
388 {
389         const char *str;
390         u32 val;
391         int err;
392
393         /*
394          * workaround for bad hardware design in Linksys WAP54G v1.0
395          * see https://dev.openwrt.org/ticket/146
396          * check and reset bit "isolate"
397          */
398         str = nvram_get("boardnum");
399         if (!str)
400                 return;
401         if (simple_strtoul(str, NULL, 0) == 2) {
402                 err = __b44_readphy(bp, 0, MII_BMCR, &val);
403                 if (err)
404                         goto error;
405                 if (!(val & BMCR_ISOLATE))
406                         return;
407                 val &= ~BMCR_ISOLATE;
408                 err = __b44_writephy(bp, 0, MII_BMCR, val);
409                 if (err)
410                         goto error;
411         }
412         return;
413 error:
414         pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
415 }
416 #else
417 static inline void b44_wap54g10_workaround(struct b44 *bp)
418 {
419 }
420 #endif
421
422 static int b44_setup_phy(struct b44 *bp)
423 {
424         u32 val;
425         int err;
426
427         b44_wap54g10_workaround(bp);
428
429         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
430                 return 0;
431         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
432                 goto out;
433         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
434                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
435                 goto out;
436         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
437                 goto out;
438         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
439                                 val | MII_TLEDCTRL_ENABLE)) != 0)
440                 goto out;
441
442         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
443                 u32 adv = ADVERTISE_CSMA;
444
445                 if (bp->flags & B44_FLAG_ADV_10HALF)
446                         adv |= ADVERTISE_10HALF;
447                 if (bp->flags & B44_FLAG_ADV_10FULL)
448                         adv |= ADVERTISE_10FULL;
449                 if (bp->flags & B44_FLAG_ADV_100HALF)
450                         adv |= ADVERTISE_100HALF;
451                 if (bp->flags & B44_FLAG_ADV_100FULL)
452                         adv |= ADVERTISE_100FULL;
453
454                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
455                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
456
457                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
458                         goto out;
459                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
460                                                        BMCR_ANRESTART))) != 0)
461                         goto out;
462         } else {
463                 u32 bmcr;
464
465                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
466                         goto out;
467                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
468                 if (bp->flags & B44_FLAG_100_BASE_T)
469                         bmcr |= BMCR_SPEED100;
470                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
471                         bmcr |= BMCR_FULLDPLX;
472                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
473                         goto out;
474
475                 /* Since we will not be negotiating there is no safe way
476                  * to determine if the link partner supports flow control
477                  * or not.  So just disable it completely in this case.
478                  */
479                 b44_set_flow_ctrl(bp, 0, 0);
480         }
481
482 out:
483         return err;
484 }
485
486 static void b44_stats_update(struct b44 *bp)
487 {
488         unsigned long reg;
489         u32 *val;
490
491         val = &bp->hw_stats.tx_good_octets;
492         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
493                 *val++ += br32(bp, reg);
494         }
495
496         /* Pad */
497         reg += 8*4UL;
498
499         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
500                 *val++ += br32(bp, reg);
501         }
502 }
503
504 static void b44_link_report(struct b44 *bp)
505 {
506         if (!netif_carrier_ok(bp->dev)) {
507                 netdev_info(bp->dev, "Link is down\n");
508         } else {
509                 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
510                             (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
511                             (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
512
513                 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
514                             (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
515                             (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
516         }
517 }
518
519 static void b44_check_phy(struct b44 *bp)
520 {
521         u32 bmsr, aux;
522
523         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
524                 bp->flags |= B44_FLAG_100_BASE_T;
525                 bp->flags |= B44_FLAG_FULL_DUPLEX;
526                 if (!netif_carrier_ok(bp->dev)) {
527                         u32 val = br32(bp, B44_TX_CTRL);
528                         val |= TX_CTRL_DUPLEX;
529                         bw32(bp, B44_TX_CTRL, val);
530                         netif_carrier_on(bp->dev);
531                         b44_link_report(bp);
532                 }
533                 return;
534         }
535
536         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
537             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
538             (bmsr != 0xffff)) {
539                 if (aux & MII_AUXCTRL_SPEED)
540                         bp->flags |= B44_FLAG_100_BASE_T;
541                 else
542                         bp->flags &= ~B44_FLAG_100_BASE_T;
543                 if (aux & MII_AUXCTRL_DUPLEX)
544                         bp->flags |= B44_FLAG_FULL_DUPLEX;
545                 else
546                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
547
548                 if (!netif_carrier_ok(bp->dev) &&
549                     (bmsr & BMSR_LSTATUS)) {
550                         u32 val = br32(bp, B44_TX_CTRL);
551                         u32 local_adv, remote_adv;
552
553                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
554                                 val |= TX_CTRL_DUPLEX;
555                         else
556                                 val &= ~TX_CTRL_DUPLEX;
557                         bw32(bp, B44_TX_CTRL, val);
558
559                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
560                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
561                             !b44_readphy(bp, MII_LPA, &remote_adv))
562                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
563
564                         /* Link now up */
565                         netif_carrier_on(bp->dev);
566                         b44_link_report(bp);
567                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
568                         /* Link now down */
569                         netif_carrier_off(bp->dev);
570                         b44_link_report(bp);
571                 }
572
573                 if (bmsr & BMSR_RFAULT)
574                         netdev_warn(bp->dev, "Remote fault detected in PHY\n");
575                 if (bmsr & BMSR_JCD)
576                         netdev_warn(bp->dev, "Jabber detected in PHY\n");
577         }
578 }
579
580 static void b44_timer(unsigned long __opaque)
581 {
582         struct b44 *bp = (struct b44 *) __opaque;
583
584         spin_lock_irq(&bp->lock);
585
586         b44_check_phy(bp);
587
588         b44_stats_update(bp);
589
590         spin_unlock_irq(&bp->lock);
591
592         mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
593 }
594
595 static void b44_tx(struct b44 *bp)
596 {
597         u32 cur, cons;
598
599         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
600         cur /= sizeof(struct dma_desc);
601
602         /* XXX needs updating when NETIF_F_SG is supported */
603         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
604                 struct ring_info *rp = &bp->tx_buffers[cons];
605                 struct sk_buff *skb = rp->skb;
606
607                 BUG_ON(skb == NULL);
608
609                 dma_unmap_single(bp->sdev->dma_dev,
610                                  rp->mapping,
611                                  skb->len,
612                                  DMA_TO_DEVICE);
613                 rp->skb = NULL;
614                 dev_kfree_skb_irq(skb);
615         }
616
617         bp->tx_cons = cons;
618         if (netif_queue_stopped(bp->dev) &&
619             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
620                 netif_wake_queue(bp->dev);
621
622         bw32(bp, B44_GPTIMER, 0);
623 }
624
625 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
626  * before the DMA address you give it.  So we allocate 30 more bytes
627  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
628  * point the chip at 30 bytes past where the rx_header will go.
629  */
630 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
631 {
632         struct dma_desc *dp;
633         struct ring_info *src_map, *map;
634         struct rx_header *rh;
635         struct sk_buff *skb;
636         dma_addr_t mapping;
637         int dest_idx;
638         u32 ctrl;
639
640         src_map = NULL;
641         if (src_idx >= 0)
642                 src_map = &bp->rx_buffers[src_idx];
643         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
644         map = &bp->rx_buffers[dest_idx];
645         skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
646         if (skb == NULL)
647                 return -ENOMEM;
648
649         mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
650                                  RX_PKT_BUF_SZ,
651                                  DMA_FROM_DEVICE);
652
653         /* Hardware bug work-around, the chip is unable to do PCI DMA
654            to/from anything above 1GB :-( */
655         if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
656                 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
657                 /* Sigh... */
658                 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
659                         dma_unmap_single(bp->sdev->dma_dev, mapping,
660                                              RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
661                 dev_kfree_skb_any(skb);
662                 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
663                 if (skb == NULL)
664                         return -ENOMEM;
665                 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
666                                          RX_PKT_BUF_SZ,
667                                          DMA_FROM_DEVICE);
668                 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
669                     mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
670                         if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
671                                 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
672                         dev_kfree_skb_any(skb);
673                         return -ENOMEM;
674                 }
675                 bp->force_copybreak = 1;
676         }
677
678         rh = (struct rx_header *) skb->data;
679
680         rh->len = 0;
681         rh->flags = 0;
682
683         map->skb = skb;
684         map->mapping = mapping;
685
686         if (src_map != NULL)
687                 src_map->skb = NULL;
688
689         ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
690         if (dest_idx == (B44_RX_RING_SIZE - 1))
691                 ctrl |= DESC_CTRL_EOT;
692
693         dp = &bp->rx_ring[dest_idx];
694         dp->ctrl = cpu_to_le32(ctrl);
695         dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
696
697         if (bp->flags & B44_FLAG_RX_RING_HACK)
698                 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
699                                             dest_idx * sizeof(*dp),
700                                             DMA_BIDIRECTIONAL);
701
702         return RX_PKT_BUF_SZ;
703 }
704
705 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
706 {
707         struct dma_desc *src_desc, *dest_desc;
708         struct ring_info *src_map, *dest_map;
709         struct rx_header *rh;
710         int dest_idx;
711         __le32 ctrl;
712
713         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
714         dest_desc = &bp->rx_ring[dest_idx];
715         dest_map = &bp->rx_buffers[dest_idx];
716         src_desc = &bp->rx_ring[src_idx];
717         src_map = &bp->rx_buffers[src_idx];
718
719         dest_map->skb = src_map->skb;
720         rh = (struct rx_header *) src_map->skb->data;
721         rh->len = 0;
722         rh->flags = 0;
723         dest_map->mapping = src_map->mapping;
724
725         if (bp->flags & B44_FLAG_RX_RING_HACK)
726                 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
727                                          src_idx * sizeof(*src_desc),
728                                          DMA_BIDIRECTIONAL);
729
730         ctrl = src_desc->ctrl;
731         if (dest_idx == (B44_RX_RING_SIZE - 1))
732                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
733         else
734                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
735
736         dest_desc->ctrl = ctrl;
737         dest_desc->addr = src_desc->addr;
738
739         src_map->skb = NULL;
740
741         if (bp->flags & B44_FLAG_RX_RING_HACK)
742                 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
743                                              dest_idx * sizeof(*dest_desc),
744                                              DMA_BIDIRECTIONAL);
745
746         dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
747                                    RX_PKT_BUF_SZ,
748                                    DMA_FROM_DEVICE);
749 }
750
751 static int b44_rx(struct b44 *bp, int budget)
752 {
753         int received;
754         u32 cons, prod;
755
756         received = 0;
757         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
758         prod /= sizeof(struct dma_desc);
759         cons = bp->rx_cons;
760
761         while (cons != prod && budget > 0) {
762                 struct ring_info *rp = &bp->rx_buffers[cons];
763                 struct sk_buff *skb = rp->skb;
764                 dma_addr_t map = rp->mapping;
765                 struct rx_header *rh;
766                 u16 len;
767
768                 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
769                                         RX_PKT_BUF_SZ,
770                                         DMA_FROM_DEVICE);
771                 rh = (struct rx_header *) skb->data;
772                 len = le16_to_cpu(rh->len);
773                 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
774                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
775                 drop_it:
776                         b44_recycle_rx(bp, cons, bp->rx_prod);
777                 drop_it_no_recycle:
778                         bp->dev->stats.rx_dropped++;
779                         goto next_pkt;
780                 }
781
782                 if (len == 0) {
783                         int i = 0;
784
785                         do {
786                                 udelay(2);
787                                 barrier();
788                                 len = le16_to_cpu(rh->len);
789                         } while (len == 0 && i++ < 5);
790                         if (len == 0)
791                                 goto drop_it;
792                 }
793
794                 /* Omit CRC. */
795                 len -= 4;
796
797                 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
798                         int skb_size;
799                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
800                         if (skb_size < 0)
801                                 goto drop_it;
802                         dma_unmap_single(bp->sdev->dma_dev, map,
803                                          skb_size, DMA_FROM_DEVICE);
804                         /* Leave out rx_header */
805                         skb_put(skb, len + RX_PKT_OFFSET);
806                         skb_pull(skb, RX_PKT_OFFSET);
807                 } else {
808                         struct sk_buff *copy_skb;
809
810                         b44_recycle_rx(bp, cons, bp->rx_prod);
811                         copy_skb = netdev_alloc_skb(bp->dev, len + 2);
812                         if (copy_skb == NULL)
813                                 goto drop_it_no_recycle;
814
815                         skb_reserve(copy_skb, 2);
816                         skb_put(copy_skb, len);
817                         /* DMA sync done above, copy just the actual packet */
818                         skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
819                                                          copy_skb->data, len);
820                         skb = copy_skb;
821                 }
822                 skb->ip_summed = CHECKSUM_NONE;
823                 skb->protocol = eth_type_trans(skb, bp->dev);
824                 netif_receive_skb(skb);
825                 received++;
826                 budget--;
827         next_pkt:
828                 bp->rx_prod = (bp->rx_prod + 1) &
829                         (B44_RX_RING_SIZE - 1);
830                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
831         }
832
833         bp->rx_cons = cons;
834         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
835
836         return received;
837 }
838
839 static int b44_poll(struct napi_struct *napi, int budget)
840 {
841         struct b44 *bp = container_of(napi, struct b44, napi);
842         int work_done;
843         unsigned long flags;
844
845         spin_lock_irqsave(&bp->lock, flags);
846
847         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
848                 /* spin_lock(&bp->tx_lock); */
849                 b44_tx(bp);
850                 /* spin_unlock(&bp->tx_lock); */
851         }
852         spin_unlock_irqrestore(&bp->lock, flags);
853
854         work_done = 0;
855         if (bp->istat & ISTAT_RX)
856                 work_done += b44_rx(bp, budget);
857
858         if (bp->istat & ISTAT_ERRORS) {
859                 spin_lock_irqsave(&bp->lock, flags);
860                 b44_halt(bp);
861                 b44_init_rings(bp);
862                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
863                 netif_wake_queue(bp->dev);
864                 spin_unlock_irqrestore(&bp->lock, flags);
865                 work_done = 0;
866         }
867
868         if (work_done < budget) {
869                 napi_complete(napi);
870                 b44_enable_ints(bp);
871         }
872
873         return work_done;
874 }
875
876 static irqreturn_t b44_interrupt(int irq, void *dev_id)
877 {
878         struct net_device *dev = dev_id;
879         struct b44 *bp = netdev_priv(dev);
880         u32 istat, imask;
881         int handled = 0;
882
883         spin_lock(&bp->lock);
884
885         istat = br32(bp, B44_ISTAT);
886         imask = br32(bp, B44_IMASK);
887
888         /* The interrupt mask register controls which interrupt bits
889          * will actually raise an interrupt to the CPU when set by hw/firmware,
890          * but doesn't mask off the bits.
891          */
892         istat &= imask;
893         if (istat) {
894                 handled = 1;
895
896                 if (unlikely(!netif_running(dev))) {
897                         netdev_info(dev, "late interrupt\n");
898                         goto irq_ack;
899                 }
900
901                 if (napi_schedule_prep(&bp->napi)) {
902                         /* NOTE: These writes are posted by the readback of
903                          *       the ISTAT register below.
904                          */
905                         bp->istat = istat;
906                         __b44_disable_ints(bp);
907                         __napi_schedule(&bp->napi);
908                 }
909
910 irq_ack:
911                 bw32(bp, B44_ISTAT, istat);
912                 br32(bp, B44_ISTAT);
913         }
914         spin_unlock(&bp->lock);
915         return IRQ_RETVAL(handled);
916 }
917
918 static void b44_tx_timeout(struct net_device *dev)
919 {
920         struct b44 *bp = netdev_priv(dev);
921
922         netdev_err(dev, "transmit timed out, resetting\n");
923
924         spin_lock_irq(&bp->lock);
925
926         b44_halt(bp);
927         b44_init_rings(bp);
928         b44_init_hw(bp, B44_FULL_RESET);
929
930         spin_unlock_irq(&bp->lock);
931
932         b44_enable_ints(bp);
933
934         netif_wake_queue(dev);
935 }
936
937 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
938 {
939         struct b44 *bp = netdev_priv(dev);
940         int rc = NETDEV_TX_OK;
941         dma_addr_t mapping;
942         u32 len, entry, ctrl;
943         unsigned long flags;
944
945         len = skb->len;
946         spin_lock_irqsave(&bp->lock, flags);
947
948         /* This is a hard error, log it. */
949         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
950                 netif_stop_queue(dev);
951                 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
952                 goto err_out;
953         }
954
955         mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
956         if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
957                 struct sk_buff *bounce_skb;
958
959                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
960                 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
961                         dma_unmap_single(bp->sdev->dma_dev, mapping, len,
962                                              DMA_TO_DEVICE);
963
964                 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
965                 if (!bounce_skb)
966                         goto err_out;
967
968                 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
969                                          len, DMA_TO_DEVICE);
970                 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
971                         if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
972                                 dma_unmap_single(bp->sdev->dma_dev, mapping,
973                                                      len, DMA_TO_DEVICE);
974                         dev_kfree_skb_any(bounce_skb);
975                         goto err_out;
976                 }
977
978                 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
979                 dev_kfree_skb_any(skb);
980                 skb = bounce_skb;
981         }
982
983         entry = bp->tx_prod;
984         bp->tx_buffers[entry].skb = skb;
985         bp->tx_buffers[entry].mapping = mapping;
986
987         ctrl  = (len & DESC_CTRL_LEN);
988         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
989         if (entry == (B44_TX_RING_SIZE - 1))
990                 ctrl |= DESC_CTRL_EOT;
991
992         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
993         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
994
995         if (bp->flags & B44_FLAG_TX_RING_HACK)
996                 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
997                                             entry * sizeof(bp->tx_ring[0]),
998                                             DMA_TO_DEVICE);
999
1000         entry = NEXT_TX(entry);
1001
1002         bp->tx_prod = entry;
1003
1004         wmb();
1005
1006         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1007         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1008                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1009         if (bp->flags & B44_FLAG_REORDER_BUG)
1010                 br32(bp, B44_DMATX_PTR);
1011
1012         if (TX_BUFFS_AVAIL(bp) < 1)
1013                 netif_stop_queue(dev);
1014
1015 out_unlock:
1016         spin_unlock_irqrestore(&bp->lock, flags);
1017
1018         return rc;
1019
1020 err_out:
1021         rc = NETDEV_TX_BUSY;
1022         goto out_unlock;
1023 }
1024
1025 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1026 {
1027         struct b44 *bp = netdev_priv(dev);
1028
1029         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1030                 return -EINVAL;
1031
1032         if (!netif_running(dev)) {
1033                 /* We'll just catch it later when the
1034                  * device is up'd.
1035                  */
1036                 dev->mtu = new_mtu;
1037                 return 0;
1038         }
1039
1040         spin_lock_irq(&bp->lock);
1041         b44_halt(bp);
1042         dev->mtu = new_mtu;
1043         b44_init_rings(bp);
1044         b44_init_hw(bp, B44_FULL_RESET);
1045         spin_unlock_irq(&bp->lock);
1046
1047         b44_enable_ints(bp);
1048
1049         return 0;
1050 }
1051
1052 /* Free up pending packets in all rx/tx rings.
1053  *
1054  * The chip has been shut down and the driver detached from
1055  * the networking, so no interrupts or new tx packets will
1056  * end up in the driver.  bp->lock is not held and we are not
1057  * in an interrupt context and thus may sleep.
1058  */
1059 static void b44_free_rings(struct b44 *bp)
1060 {
1061         struct ring_info *rp;
1062         int i;
1063
1064         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1065                 rp = &bp->rx_buffers[i];
1066
1067                 if (rp->skb == NULL)
1068                         continue;
1069                 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1070                                  DMA_FROM_DEVICE);
1071                 dev_kfree_skb_any(rp->skb);
1072                 rp->skb = NULL;
1073         }
1074
1075         /* XXX needs changes once NETIF_F_SG is set... */
1076         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1077                 rp = &bp->tx_buffers[i];
1078
1079                 if (rp->skb == NULL)
1080                         continue;
1081                 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1082                                  DMA_TO_DEVICE);
1083                 dev_kfree_skb_any(rp->skb);
1084                 rp->skb = NULL;
1085         }
1086 }
1087
1088 /* Initialize tx/rx rings for packet processing.
1089  *
1090  * The chip has been shut down and the driver detached from
1091  * the networking, so no interrupts or new tx packets will
1092  * end up in the driver.
1093  */
1094 static void b44_init_rings(struct b44 *bp)
1095 {
1096         int i;
1097
1098         b44_free_rings(bp);
1099
1100         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1101         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1102
1103         if (bp->flags & B44_FLAG_RX_RING_HACK)
1104                 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1105                                            DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1106
1107         if (bp->flags & B44_FLAG_TX_RING_HACK)
1108                 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1109                                            DMA_TABLE_BYTES, DMA_TO_DEVICE);
1110
1111         for (i = 0; i < bp->rx_pending; i++) {
1112                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1113                         break;
1114         }
1115 }
1116
1117 /*
1118  * Must not be invoked with interrupt sources disabled and
1119  * the hardware shutdown down.
1120  */
1121 static void b44_free_consistent(struct b44 *bp)
1122 {
1123         kfree(bp->rx_buffers);
1124         bp->rx_buffers = NULL;
1125         kfree(bp->tx_buffers);
1126         bp->tx_buffers = NULL;
1127         if (bp->rx_ring) {
1128                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1129                         dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1130                                          DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1131                         kfree(bp->rx_ring);
1132                 } else
1133                         dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1134                                           bp->rx_ring, bp->rx_ring_dma);
1135                 bp->rx_ring = NULL;
1136                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1137         }
1138         if (bp->tx_ring) {
1139                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1140                         dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1141                                          DMA_TABLE_BYTES, DMA_TO_DEVICE);
1142                         kfree(bp->tx_ring);
1143                 } else
1144                         dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1145                                           bp->tx_ring, bp->tx_ring_dma);
1146                 bp->tx_ring = NULL;
1147                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1148         }
1149 }
1150
1151 /*
1152  * Must not be invoked with interrupt sources disabled and
1153  * the hardware shutdown down.  Can sleep.
1154  */
1155 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1156 {
1157         int size;
1158
1159         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1160         bp->rx_buffers = kzalloc(size, gfp);
1161         if (!bp->rx_buffers)
1162                 goto out_err;
1163
1164         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1165         bp->tx_buffers = kzalloc(size, gfp);
1166         if (!bp->tx_buffers)
1167                 goto out_err;
1168
1169         size = DMA_TABLE_BYTES;
1170         bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1171                                          &bp->rx_ring_dma, gfp);
1172         if (!bp->rx_ring) {
1173                 /* Allocation may have failed due to pci_alloc_consistent
1174                    insisting on use of GFP_DMA, which is more restrictive
1175                    than necessary...  */
1176                 struct dma_desc *rx_ring;
1177                 dma_addr_t rx_ring_dma;
1178
1179                 rx_ring = kzalloc(size, gfp);
1180                 if (!rx_ring)
1181                         goto out_err;
1182
1183                 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1184                                              DMA_TABLE_BYTES,
1185                                              DMA_BIDIRECTIONAL);
1186
1187                 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1188                         rx_ring_dma + size > DMA_BIT_MASK(30)) {
1189                         kfree(rx_ring);
1190                         goto out_err;
1191                 }
1192
1193                 bp->rx_ring = rx_ring;
1194                 bp->rx_ring_dma = rx_ring_dma;
1195                 bp->flags |= B44_FLAG_RX_RING_HACK;
1196         }
1197
1198         bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1199                                          &bp->tx_ring_dma, gfp);
1200         if (!bp->tx_ring) {
1201                 /* Allocation may have failed due to ssb_dma_alloc_consistent
1202                    insisting on use of GFP_DMA, which is more restrictive
1203                    than necessary...  */
1204                 struct dma_desc *tx_ring;
1205                 dma_addr_t tx_ring_dma;
1206
1207                 tx_ring = kzalloc(size, gfp);
1208                 if (!tx_ring)
1209                         goto out_err;
1210
1211                 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1212                                              DMA_TABLE_BYTES,
1213                                              DMA_TO_DEVICE);
1214
1215                 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1216                         tx_ring_dma + size > DMA_BIT_MASK(30)) {
1217                         kfree(tx_ring);
1218                         goto out_err;
1219                 }
1220
1221                 bp->tx_ring = tx_ring;
1222                 bp->tx_ring_dma = tx_ring_dma;
1223                 bp->flags |= B44_FLAG_TX_RING_HACK;
1224         }
1225
1226         return 0;
1227
1228 out_err:
1229         b44_free_consistent(bp);
1230         return -ENOMEM;
1231 }
1232
1233 /* bp->lock is held. */
1234 static void b44_clear_stats(struct b44 *bp)
1235 {
1236         unsigned long reg;
1237
1238         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1239         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1240                 br32(bp, reg);
1241         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1242                 br32(bp, reg);
1243 }
1244
1245 /* bp->lock is held. */
1246 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1247 {
1248         struct ssb_device *sdev = bp->sdev;
1249         bool was_enabled;
1250
1251         was_enabled = ssb_device_is_enabled(bp->sdev);
1252
1253         ssb_device_enable(bp->sdev, 0);
1254         ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1255
1256         if (was_enabled) {
1257                 bw32(bp, B44_RCV_LAZY, 0);
1258                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1259                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1260                 bw32(bp, B44_DMATX_CTRL, 0);
1261                 bp->tx_prod = bp->tx_cons = 0;
1262                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1263                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1264                                      100, 0);
1265                 }
1266                 bw32(bp, B44_DMARX_CTRL, 0);
1267                 bp->rx_prod = bp->rx_cons = 0;
1268         }
1269
1270         b44_clear_stats(bp);
1271
1272         /*
1273          * Don't enable PHY if we are doing a partial reset
1274          * we are probably going to power down
1275          */
1276         if (reset_kind == B44_CHIP_RESET_PARTIAL)
1277                 return;
1278
1279         switch (sdev->bus->bustype) {
1280         case SSB_BUSTYPE_SSB:
1281                 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1282                      (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1283                                         B44_MDC_RATIO)
1284                      & MDIO_CTRL_MAXF_MASK)));
1285                 break;
1286         case SSB_BUSTYPE_PCI:
1287                 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1288                      (0x0d & MDIO_CTRL_MAXF_MASK)));
1289                 break;
1290         case SSB_BUSTYPE_PCMCIA:
1291         case SSB_BUSTYPE_SDIO:
1292                 WARN_ON(1); /* A device with this bus does not exist. */
1293                 break;
1294         }
1295
1296         br32(bp, B44_MDIO_CTRL);
1297
1298         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1299                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1300                 br32(bp, B44_ENET_CTRL);
1301                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1302         } else {
1303                 u32 val = br32(bp, B44_DEVCTRL);
1304
1305                 if (val & DEVCTRL_EPR) {
1306                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1307                         br32(bp, B44_DEVCTRL);
1308                         udelay(100);
1309                 }
1310                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1311         }
1312 }
1313
1314 /* bp->lock is held. */
1315 static void b44_halt(struct b44 *bp)
1316 {
1317         b44_disable_ints(bp);
1318         /* reset PHY */
1319         b44_phy_reset(bp);
1320         /* power down PHY */
1321         netdev_info(bp->dev, "powering down PHY\n");
1322         bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1323         /* now reset the chip, but without enabling the MAC&PHY
1324          * part of it. This has to be done _after_ we shut down the PHY */
1325         b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1326 }
1327
1328 /* bp->lock is held. */
1329 static void __b44_set_mac_addr(struct b44 *bp)
1330 {
1331         bw32(bp, B44_CAM_CTRL, 0);
1332         if (!(bp->dev->flags & IFF_PROMISC)) {
1333                 u32 val;
1334
1335                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1336                 val = br32(bp, B44_CAM_CTRL);
1337                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1338         }
1339 }
1340
1341 static int b44_set_mac_addr(struct net_device *dev, void *p)
1342 {
1343         struct b44 *bp = netdev_priv(dev);
1344         struct sockaddr *addr = p;
1345         u32 val;
1346
1347         if (netif_running(dev))
1348                 return -EBUSY;
1349
1350         if (!is_valid_ether_addr(addr->sa_data))
1351                 return -EINVAL;
1352
1353         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1354
1355         spin_lock_irq(&bp->lock);
1356
1357         val = br32(bp, B44_RXCONFIG);
1358         if (!(val & RXCONFIG_CAM_ABSENT))
1359                 __b44_set_mac_addr(bp);
1360
1361         spin_unlock_irq(&bp->lock);
1362
1363         return 0;
1364 }
1365
1366 /* Called at device open time to get the chip ready for
1367  * packet processing.  Invoked with bp->lock held.
1368  */
1369 static void __b44_set_rx_mode(struct net_device *);
1370 static void b44_init_hw(struct b44 *bp, int reset_kind)
1371 {
1372         u32 val;
1373
1374         b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1375         if (reset_kind == B44_FULL_RESET) {
1376                 b44_phy_reset(bp);
1377                 b44_setup_phy(bp);
1378         }
1379
1380         /* Enable CRC32, set proper LED modes and power on PHY */
1381         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1382         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1383
1384         /* This sets the MAC address too.  */
1385         __b44_set_rx_mode(bp->dev);
1386
1387         /* MTU + eth header + possible VLAN tag + struct rx_header */
1388         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1389         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1390
1391         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1392         if (reset_kind == B44_PARTIAL_RESET) {
1393                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1394                                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1395         } else {
1396                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1397                 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1398                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1399                                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1400                 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1401
1402                 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1403                 bp->rx_prod = bp->rx_pending;
1404
1405                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1406         }
1407
1408         val = br32(bp, B44_ENET_CTRL);
1409         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1410 }
1411
1412 static int b44_open(struct net_device *dev)
1413 {
1414         struct b44 *bp = netdev_priv(dev);
1415         int err;
1416
1417         err = b44_alloc_consistent(bp, GFP_KERNEL);
1418         if (err)
1419                 goto out;
1420
1421         napi_enable(&bp->napi);
1422
1423         b44_init_rings(bp);
1424         b44_init_hw(bp, B44_FULL_RESET);
1425
1426         b44_check_phy(bp);
1427
1428         err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1429         if (unlikely(err < 0)) {
1430                 napi_disable(&bp->napi);
1431                 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1432                 b44_free_rings(bp);
1433                 b44_free_consistent(bp);
1434                 goto out;
1435         }
1436
1437         init_timer(&bp->timer);
1438         bp->timer.expires = jiffies + HZ;
1439         bp->timer.data = (unsigned long) bp;
1440         bp->timer.function = b44_timer;
1441         add_timer(&bp->timer);
1442
1443         b44_enable_ints(bp);
1444         netif_start_queue(dev);
1445 out:
1446         return err;
1447 }
1448
1449 #ifdef CONFIG_NET_POLL_CONTROLLER
1450 /*
1451  * Polling receive - used by netconsole and other diagnostic tools
1452  * to allow network i/o with interrupts disabled.
1453  */
1454 static void b44_poll_controller(struct net_device *dev)
1455 {
1456         disable_irq(dev->irq);
1457         b44_interrupt(dev->irq, dev);
1458         enable_irq(dev->irq);
1459 }
1460 #endif
1461
1462 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1463 {
1464         u32 i;
1465         u32 *pattern = (u32 *) pp;
1466
1467         for (i = 0; i < bytes; i += sizeof(u32)) {
1468                 bw32(bp, B44_FILT_ADDR, table_offset + i);
1469                 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1470         }
1471 }
1472
1473 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1474 {
1475         int magicsync = 6;
1476         int k, j, len = offset;
1477         int ethaddr_bytes = ETH_ALEN;
1478
1479         memset(ppattern + offset, 0xff, magicsync);
1480         for (j = 0; j < magicsync; j++)
1481                 set_bit(len++, (unsigned long *) pmask);
1482
1483         for (j = 0; j < B44_MAX_PATTERNS; j++) {
1484                 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1485                         ethaddr_bytes = ETH_ALEN;
1486                 else
1487                         ethaddr_bytes = B44_PATTERN_SIZE - len;
1488                 if (ethaddr_bytes <=0)
1489                         break;
1490                 for (k = 0; k< ethaddr_bytes; k++) {
1491                         ppattern[offset + magicsync +
1492                                 (j * ETH_ALEN) + k] = macaddr[k];
1493                         set_bit(len++, (unsigned long *) pmask);
1494                 }
1495         }
1496         return len - 1;
1497 }
1498
1499 /* Setup magic packet patterns in the b44 WOL
1500  * pattern matching filter.
1501  */
1502 static void b44_setup_pseudo_magicp(struct b44 *bp)
1503 {
1504
1505         u32 val;
1506         int plen0, plen1, plen2;
1507         u8 *pwol_pattern;
1508         u8 pwol_mask[B44_PMASK_SIZE];
1509
1510         pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1511         if (!pwol_pattern) {
1512                 pr_err("Memory not available for WOL\n");
1513                 return;
1514         }
1515
1516         /* Ipv4 magic packet pattern - pattern 0.*/
1517         memset(pwol_mask, 0, B44_PMASK_SIZE);
1518         plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1519                                   B44_ETHIPV4UDP_HLEN);
1520
1521         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1522         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1523
1524         /* Raw ethernet II magic packet pattern - pattern 1 */
1525         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1526         memset(pwol_mask, 0, B44_PMASK_SIZE);
1527         plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1528                                   ETH_HLEN);
1529
1530         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1531                        B44_PATTERN_BASE + B44_PATTERN_SIZE);
1532         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1533                        B44_PMASK_BASE + B44_PMASK_SIZE);
1534
1535         /* Ipv6 magic packet pattern - pattern 2 */
1536         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1537         memset(pwol_mask, 0, B44_PMASK_SIZE);
1538         plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1539                                   B44_ETHIPV6UDP_HLEN);
1540
1541         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1542                        B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1543         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1544                        B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1545
1546         kfree(pwol_pattern);
1547
1548         /* set these pattern's lengths: one less than each real length */
1549         val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1550         bw32(bp, B44_WKUP_LEN, val);
1551
1552         /* enable wakeup pattern matching */
1553         val = br32(bp, B44_DEVCTRL);
1554         bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1555
1556 }
1557
1558 #ifdef CONFIG_B44_PCI
1559 static void b44_setup_wol_pci(struct b44 *bp)
1560 {
1561         u16 val;
1562
1563         if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1564                 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1565                 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1566                 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1567         }
1568 }
1569 #else
1570 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1571 #endif /* CONFIG_B44_PCI */
1572
1573 static void b44_setup_wol(struct b44 *bp)
1574 {
1575         u32 val;
1576
1577         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1578
1579         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1580
1581                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1582
1583                 val = bp->dev->dev_addr[2] << 24 |
1584                         bp->dev->dev_addr[3] << 16 |
1585                         bp->dev->dev_addr[4] << 8 |
1586                         bp->dev->dev_addr[5];
1587                 bw32(bp, B44_ADDR_LO, val);
1588
1589                 val = bp->dev->dev_addr[0] << 8 |
1590                         bp->dev->dev_addr[1];
1591                 bw32(bp, B44_ADDR_HI, val);
1592
1593                 val = br32(bp, B44_DEVCTRL);
1594                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1595
1596         } else {
1597                 b44_setup_pseudo_magicp(bp);
1598         }
1599         b44_setup_wol_pci(bp);
1600 }
1601
1602 static int b44_close(struct net_device *dev)
1603 {
1604         struct b44 *bp = netdev_priv(dev);
1605
1606         netif_stop_queue(dev);
1607
1608         napi_disable(&bp->napi);
1609
1610         del_timer_sync(&bp->timer);
1611
1612         spin_lock_irq(&bp->lock);
1613
1614         b44_halt(bp);
1615         b44_free_rings(bp);
1616         netif_carrier_off(dev);
1617
1618         spin_unlock_irq(&bp->lock);
1619
1620         free_irq(dev->irq, dev);
1621
1622         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1623                 b44_init_hw(bp, B44_PARTIAL_RESET);
1624                 b44_setup_wol(bp);
1625         }
1626
1627         b44_free_consistent(bp);
1628
1629         return 0;
1630 }
1631
1632 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1633 {
1634         struct b44 *bp = netdev_priv(dev);
1635         struct net_device_stats *nstat = &dev->stats;
1636         struct b44_hw_stats *hwstat = &bp->hw_stats;
1637
1638         /* Convert HW stats into netdevice stats. */
1639         nstat->rx_packets = hwstat->rx_pkts;
1640         nstat->tx_packets = hwstat->tx_pkts;
1641         nstat->rx_bytes   = hwstat->rx_octets;
1642         nstat->tx_bytes   = hwstat->tx_octets;
1643         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1644                              hwstat->tx_oversize_pkts +
1645                              hwstat->tx_underruns +
1646                              hwstat->tx_excessive_cols +
1647                              hwstat->tx_late_cols);
1648         nstat->multicast  = hwstat->tx_multicast_pkts;
1649         nstat->collisions = hwstat->tx_total_cols;
1650
1651         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1652                                    hwstat->rx_undersize);
1653         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1654         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1655         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1656         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1657                                    hwstat->rx_oversize_pkts +
1658                                    hwstat->rx_missed_pkts +
1659                                    hwstat->rx_crc_align_errs +
1660                                    hwstat->rx_undersize +
1661                                    hwstat->rx_crc_errs +
1662                                    hwstat->rx_align_errs +
1663                                    hwstat->rx_symbol_errs);
1664
1665         nstat->tx_aborted_errors = hwstat->tx_underruns;
1666 #if 0
1667         /* Carrier lost counter seems to be broken for some devices */
1668         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1669 #endif
1670
1671         return nstat;
1672 }
1673
1674 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1675 {
1676         struct netdev_hw_addr *ha;
1677         int i, num_ents;
1678
1679         num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1680         i = 0;
1681         netdev_for_each_mc_addr(ha, dev) {
1682                 if (i == num_ents)
1683                         break;
1684                 __b44_cam_write(bp, ha->addr, i++ + 1);
1685         }
1686         return i+1;
1687 }
1688
1689 static void __b44_set_rx_mode(struct net_device *dev)
1690 {
1691         struct b44 *bp = netdev_priv(dev);
1692         u32 val;
1693
1694         val = br32(bp, B44_RXCONFIG);
1695         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1696         if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1697                 val |= RXCONFIG_PROMISC;
1698                 bw32(bp, B44_RXCONFIG, val);
1699         } else {
1700                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1701                 int i = 1;
1702
1703                 __b44_set_mac_addr(bp);
1704
1705                 if ((dev->flags & IFF_ALLMULTI) ||
1706                     (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1707                         val |= RXCONFIG_ALLMULTI;
1708                 else
1709                         i = __b44_load_mcast(bp, dev);
1710
1711                 for (; i < 64; i++)
1712                         __b44_cam_write(bp, zero, i);
1713
1714                 bw32(bp, B44_RXCONFIG, val);
1715                 val = br32(bp, B44_CAM_CTRL);
1716                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1717         }
1718 }
1719
1720 static void b44_set_rx_mode(struct net_device *dev)
1721 {
1722         struct b44 *bp = netdev_priv(dev);
1723
1724         spin_lock_irq(&bp->lock);
1725         __b44_set_rx_mode(dev);
1726         spin_unlock_irq(&bp->lock);
1727 }
1728
1729 static u32 b44_get_msglevel(struct net_device *dev)
1730 {
1731         struct b44 *bp = netdev_priv(dev);
1732         return bp->msg_enable;
1733 }
1734
1735 static void b44_set_msglevel(struct net_device *dev, u32 value)
1736 {
1737         struct b44 *bp = netdev_priv(dev);
1738         bp->msg_enable = value;
1739 }
1740
1741 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1742 {
1743         struct b44 *bp = netdev_priv(dev);
1744         struct ssb_bus *bus = bp->sdev->bus;
1745
1746         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1747         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1748         switch (bus->bustype) {
1749         case SSB_BUSTYPE_PCI:
1750                 strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1751                 break;
1752         case SSB_BUSTYPE_SSB:
1753                 strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1754                 break;
1755         case SSB_BUSTYPE_PCMCIA:
1756         case SSB_BUSTYPE_SDIO:
1757                 WARN_ON(1); /* A device with this bus does not exist. */
1758                 break;
1759         }
1760 }
1761
1762 static int b44_nway_reset(struct net_device *dev)
1763 {
1764         struct b44 *bp = netdev_priv(dev);
1765         u32 bmcr;
1766         int r;
1767
1768         spin_lock_irq(&bp->lock);
1769         b44_readphy(bp, MII_BMCR, &bmcr);
1770         b44_readphy(bp, MII_BMCR, &bmcr);
1771         r = -EINVAL;
1772         if (bmcr & BMCR_ANENABLE) {
1773                 b44_writephy(bp, MII_BMCR,
1774                              bmcr | BMCR_ANRESTART);
1775                 r = 0;
1776         }
1777         spin_unlock_irq(&bp->lock);
1778
1779         return r;
1780 }
1781
1782 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1783 {
1784         struct b44 *bp = netdev_priv(dev);
1785
1786         cmd->supported = (SUPPORTED_Autoneg);
1787         cmd->supported |= (SUPPORTED_100baseT_Half |
1788                           SUPPORTED_100baseT_Full |
1789                           SUPPORTED_10baseT_Half |
1790                           SUPPORTED_10baseT_Full |
1791                           SUPPORTED_MII);
1792
1793         cmd->advertising = 0;
1794         if (bp->flags & B44_FLAG_ADV_10HALF)
1795                 cmd->advertising |= ADVERTISED_10baseT_Half;
1796         if (bp->flags & B44_FLAG_ADV_10FULL)
1797                 cmd->advertising |= ADVERTISED_10baseT_Full;
1798         if (bp->flags & B44_FLAG_ADV_100HALF)
1799                 cmd->advertising |= ADVERTISED_100baseT_Half;
1800         if (bp->flags & B44_FLAG_ADV_100FULL)
1801                 cmd->advertising |= ADVERTISED_100baseT_Full;
1802         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1803         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1804                 SPEED_100 : SPEED_10;
1805         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1806                 DUPLEX_FULL : DUPLEX_HALF;
1807         cmd->port = 0;
1808         cmd->phy_address = bp->phy_addr;
1809         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1810                 XCVR_INTERNAL : XCVR_EXTERNAL;
1811         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1812                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1813         if (cmd->autoneg == AUTONEG_ENABLE)
1814                 cmd->advertising |= ADVERTISED_Autoneg;
1815         if (!netif_running(dev)){
1816                 cmd->speed = 0;
1817                 cmd->duplex = 0xff;
1818         }
1819         cmd->maxtxpkt = 0;
1820         cmd->maxrxpkt = 0;
1821         return 0;
1822 }
1823
1824 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1825 {
1826         struct b44 *bp = netdev_priv(dev);
1827
1828         /* We do not support gigabit. */
1829         if (cmd->autoneg == AUTONEG_ENABLE) {
1830                 if (cmd->advertising &
1831                     (ADVERTISED_1000baseT_Half |
1832                      ADVERTISED_1000baseT_Full))
1833                         return -EINVAL;
1834         } else if ((cmd->speed != SPEED_100 &&
1835                     cmd->speed != SPEED_10) ||
1836                    (cmd->duplex != DUPLEX_HALF &&
1837                     cmd->duplex != DUPLEX_FULL)) {
1838                         return -EINVAL;
1839         }
1840
1841         spin_lock_irq(&bp->lock);
1842
1843         if (cmd->autoneg == AUTONEG_ENABLE) {
1844                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1845                                B44_FLAG_100_BASE_T |
1846                                B44_FLAG_FULL_DUPLEX |
1847                                B44_FLAG_ADV_10HALF |
1848                                B44_FLAG_ADV_10FULL |
1849                                B44_FLAG_ADV_100HALF |
1850                                B44_FLAG_ADV_100FULL);
1851                 if (cmd->advertising == 0) {
1852                         bp->flags |= (B44_FLAG_ADV_10HALF |
1853                                       B44_FLAG_ADV_10FULL |
1854                                       B44_FLAG_ADV_100HALF |
1855                                       B44_FLAG_ADV_100FULL);
1856                 } else {
1857                         if (cmd->advertising & ADVERTISED_10baseT_Half)
1858                                 bp->flags |= B44_FLAG_ADV_10HALF;
1859                         if (cmd->advertising & ADVERTISED_10baseT_Full)
1860                                 bp->flags |= B44_FLAG_ADV_10FULL;
1861                         if (cmd->advertising & ADVERTISED_100baseT_Half)
1862                                 bp->flags |= B44_FLAG_ADV_100HALF;
1863                         if (cmd->advertising & ADVERTISED_100baseT_Full)
1864                                 bp->flags |= B44_FLAG_ADV_100FULL;
1865                 }
1866         } else {
1867                 bp->flags |= B44_FLAG_FORCE_LINK;
1868                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1869                 if (cmd->speed == SPEED_100)
1870                         bp->flags |= B44_FLAG_100_BASE_T;
1871                 if (cmd->duplex == DUPLEX_FULL)
1872                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1873         }
1874
1875         if (netif_running(dev))
1876                 b44_setup_phy(bp);
1877
1878         spin_unlock_irq(&bp->lock);
1879
1880         return 0;
1881 }
1882
1883 static void b44_get_ringparam(struct net_device *dev,
1884                               struct ethtool_ringparam *ering)
1885 {
1886         struct b44 *bp = netdev_priv(dev);
1887
1888         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1889         ering->rx_pending = bp->rx_pending;
1890
1891         /* XXX ethtool lacks a tx_max_pending, oops... */
1892 }
1893
1894 static int b44_set_ringparam(struct net_device *dev,
1895                              struct ethtool_ringparam *ering)
1896 {
1897         struct b44 *bp = netdev_priv(dev);
1898
1899         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1900             (ering->rx_mini_pending != 0) ||
1901             (ering->rx_jumbo_pending != 0) ||
1902             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1903                 return -EINVAL;
1904
1905         spin_lock_irq(&bp->lock);
1906
1907         bp->rx_pending = ering->rx_pending;
1908         bp->tx_pending = ering->tx_pending;
1909
1910         b44_halt(bp);
1911         b44_init_rings(bp);
1912         b44_init_hw(bp, B44_FULL_RESET);
1913         netif_wake_queue(bp->dev);
1914         spin_unlock_irq(&bp->lock);
1915
1916         b44_enable_ints(bp);
1917
1918         return 0;
1919 }
1920
1921 static void b44_get_pauseparam(struct net_device *dev,
1922                                 struct ethtool_pauseparam *epause)
1923 {
1924         struct b44 *bp = netdev_priv(dev);
1925
1926         epause->autoneg =
1927                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1928         epause->rx_pause =
1929                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1930         epause->tx_pause =
1931                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1932 }
1933
1934 static int b44_set_pauseparam(struct net_device *dev,
1935                                 struct ethtool_pauseparam *epause)
1936 {
1937         struct b44 *bp = netdev_priv(dev);
1938
1939         spin_lock_irq(&bp->lock);
1940         if (epause->autoneg)
1941                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1942         else
1943                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1944         if (epause->rx_pause)
1945                 bp->flags |= B44_FLAG_RX_PAUSE;
1946         else
1947                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1948         if (epause->tx_pause)
1949                 bp->flags |= B44_FLAG_TX_PAUSE;
1950         else
1951                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1952         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1953                 b44_halt(bp);
1954                 b44_init_rings(bp);
1955                 b44_init_hw(bp, B44_FULL_RESET);
1956         } else {
1957                 __b44_set_flow_ctrl(bp, bp->flags);
1958         }
1959         spin_unlock_irq(&bp->lock);
1960
1961         b44_enable_ints(bp);
1962
1963         return 0;
1964 }
1965
1966 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1967 {
1968         switch(stringset) {
1969         case ETH_SS_STATS:
1970                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1971                 break;
1972         }
1973 }
1974
1975 static int b44_get_sset_count(struct net_device *dev, int sset)
1976 {
1977         switch (sset) {
1978         case ETH_SS_STATS:
1979                 return ARRAY_SIZE(b44_gstrings);
1980         default:
1981                 return -EOPNOTSUPP;
1982         }
1983 }
1984
1985 static void b44_get_ethtool_stats(struct net_device *dev,
1986                                   struct ethtool_stats *stats, u64 *data)
1987 {
1988         struct b44 *bp = netdev_priv(dev);
1989         u32 *val = &bp->hw_stats.tx_good_octets;
1990         u32 i;
1991
1992         spin_lock_irq(&bp->lock);
1993
1994         b44_stats_update(bp);
1995
1996         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1997                 *data++ = *val++;
1998
1999         spin_unlock_irq(&bp->lock);
2000 }
2001
2002 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2003 {
2004         struct b44 *bp = netdev_priv(dev);
2005
2006         wol->supported = WAKE_MAGIC;
2007         if (bp->flags & B44_FLAG_WOL_ENABLE)
2008                 wol->wolopts = WAKE_MAGIC;
2009         else
2010                 wol->wolopts = 0;
2011         memset(&wol->sopass, 0, sizeof(wol->sopass));
2012 }
2013
2014 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2015 {
2016         struct b44 *bp = netdev_priv(dev);
2017
2018         spin_lock_irq(&bp->lock);
2019         if (wol->wolopts & WAKE_MAGIC)
2020                 bp->flags |= B44_FLAG_WOL_ENABLE;
2021         else
2022                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2023         spin_unlock_irq(&bp->lock);
2024
2025         return 0;
2026 }
2027
2028 static const struct ethtool_ops b44_ethtool_ops = {
2029         .get_drvinfo            = b44_get_drvinfo,
2030         .get_settings           = b44_get_settings,
2031         .set_settings           = b44_set_settings,
2032         .nway_reset             = b44_nway_reset,
2033         .get_link               = ethtool_op_get_link,
2034         .get_wol                = b44_get_wol,
2035         .set_wol                = b44_set_wol,
2036         .get_ringparam          = b44_get_ringparam,
2037         .set_ringparam          = b44_set_ringparam,
2038         .get_pauseparam         = b44_get_pauseparam,
2039         .set_pauseparam         = b44_set_pauseparam,
2040         .get_msglevel           = b44_get_msglevel,
2041         .set_msglevel           = b44_set_msglevel,
2042         .get_strings            = b44_get_strings,
2043         .get_sset_count         = b44_get_sset_count,
2044         .get_ethtool_stats      = b44_get_ethtool_stats,
2045 };
2046
2047 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2048 {
2049         struct mii_ioctl_data *data = if_mii(ifr);
2050         struct b44 *bp = netdev_priv(dev);
2051         int err = -EINVAL;
2052
2053         if (!netif_running(dev))
2054                 goto out;
2055
2056         spin_lock_irq(&bp->lock);
2057         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2058         spin_unlock_irq(&bp->lock);
2059 out:
2060         return err;
2061 }
2062
2063 static int __devinit b44_get_invariants(struct b44 *bp)
2064 {
2065         struct ssb_device *sdev = bp->sdev;
2066         int err = 0;
2067         u8 *addr;
2068
2069         bp->dma_offset = ssb_dma_translation(sdev);
2070
2071         if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2072             instance > 1) {
2073                 addr = sdev->bus->sprom.et1mac;
2074                 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2075         } else {
2076                 addr = sdev->bus->sprom.et0mac;
2077                 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2078         }
2079         /* Some ROMs have buggy PHY addresses with the high
2080          * bits set (sign extension?). Truncate them to a
2081          * valid PHY address. */
2082         bp->phy_addr &= 0x1F;
2083
2084         memcpy(bp->dev->dev_addr, addr, 6);
2085
2086         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2087                 pr_err("Invalid MAC address found in EEPROM\n");
2088                 return -EINVAL;
2089         }
2090
2091         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2092
2093         bp->imask = IMASK_DEF;
2094
2095         /* XXX - really required?
2096            bp->flags |= B44_FLAG_BUGGY_TXPTR;
2097         */
2098
2099         if (bp->sdev->id.revision >= 7)
2100                 bp->flags |= B44_FLAG_B0_ANDLATER;
2101
2102         return err;
2103 }
2104
2105 static const struct net_device_ops b44_netdev_ops = {
2106         .ndo_open               = b44_open,
2107         .ndo_stop               = b44_close,
2108         .ndo_start_xmit         = b44_start_xmit,
2109         .ndo_get_stats          = b44_get_stats,
2110         .ndo_set_multicast_list = b44_set_rx_mode,
2111         .ndo_set_mac_address    = b44_set_mac_addr,
2112         .ndo_validate_addr      = eth_validate_addr,
2113         .ndo_do_ioctl           = b44_ioctl,
2114         .ndo_tx_timeout         = b44_tx_timeout,
2115         .ndo_change_mtu         = b44_change_mtu,
2116 #ifdef CONFIG_NET_POLL_CONTROLLER
2117         .ndo_poll_controller    = b44_poll_controller,
2118 #endif
2119 };
2120
2121 static int __devinit b44_init_one(struct ssb_device *sdev,
2122                                   const struct ssb_device_id *ent)
2123 {
2124         static int b44_version_printed = 0;
2125         struct net_device *dev;
2126         struct b44 *bp;
2127         int err;
2128
2129         instance++;
2130
2131         if (b44_version_printed++ == 0)
2132                 pr_info("%s", version);
2133
2134
2135         dev = alloc_etherdev(sizeof(*bp));
2136         if (!dev) {
2137                 dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
2138                 err = -ENOMEM;
2139                 goto out;
2140         }
2141
2142         SET_NETDEV_DEV(dev, sdev->dev);
2143
2144         /* No interesting netdevice features in this card... */
2145         dev->features |= 0;
2146
2147         bp = netdev_priv(dev);
2148         bp->sdev = sdev;
2149         bp->dev = dev;
2150         bp->force_copybreak = 0;
2151
2152         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2153
2154         spin_lock_init(&bp->lock);
2155
2156         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2157         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2158
2159         dev->netdev_ops = &b44_netdev_ops;
2160         netif_napi_add(dev, &bp->napi, b44_poll, 64);
2161         dev->watchdog_timeo = B44_TX_TIMEOUT;
2162         dev->irq = sdev->irq;
2163         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2164
2165         netif_carrier_off(dev);
2166
2167         err = ssb_bus_powerup(sdev->bus, 0);
2168         if (err) {
2169                 dev_err(sdev->dev,
2170                         "Failed to powerup the bus\n");
2171                 goto err_out_free_dev;
2172         }
2173
2174         if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
2175             dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
2176                 dev_err(sdev->dev,
2177                         "Required 30BIT DMA mask unsupported by the system\n");
2178                 goto err_out_powerdown;
2179         }
2180
2181         err = b44_get_invariants(bp);
2182         if (err) {
2183                 dev_err(sdev->dev,
2184                         "Problem fetching invariants of chip, aborting\n");
2185                 goto err_out_powerdown;
2186         }
2187
2188         bp->mii_if.dev = dev;
2189         bp->mii_if.mdio_read = b44_mii_read;
2190         bp->mii_if.mdio_write = b44_mii_write;
2191         bp->mii_if.phy_id = bp->phy_addr;
2192         bp->mii_if.phy_id_mask = 0x1f;
2193         bp->mii_if.reg_num_mask = 0x1f;
2194
2195         /* By default, advertise all speed/duplex settings. */
2196         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2197                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2198
2199         /* By default, auto-negotiate PAUSE. */
2200         bp->flags |= B44_FLAG_PAUSE_AUTO;
2201
2202         err = register_netdev(dev);
2203         if (err) {
2204                 dev_err(sdev->dev, "Cannot register net device, aborting\n");
2205                 goto err_out_powerdown;
2206         }
2207
2208         ssb_set_drvdata(sdev, dev);
2209
2210         /* Chip reset provides power to the b44 MAC & PCI cores, which
2211          * is necessary for MAC register access.
2212          */
2213         b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2214
2215         /* do a phy reset to test if there is an active phy */
2216         if (b44_phy_reset(bp) < 0)
2217                 bp->phy_addr = B44_PHY_ADDR_NO_PHY;
2218
2219         netdev_info(dev, "Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
2220                     dev->dev_addr);
2221
2222         return 0;
2223
2224 err_out_powerdown:
2225         ssb_bus_may_powerdown(sdev->bus);
2226
2227 err_out_free_dev:
2228         free_netdev(dev);
2229
2230 out:
2231         return err;
2232 }
2233
2234 static void __devexit b44_remove_one(struct ssb_device *sdev)
2235 {
2236         struct net_device *dev = ssb_get_drvdata(sdev);
2237
2238         unregister_netdev(dev);
2239         ssb_device_disable(sdev, 0);
2240         ssb_bus_may_powerdown(sdev->bus);
2241         free_netdev(dev);
2242         ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2243         ssb_set_drvdata(sdev, NULL);
2244 }
2245
2246 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2247 {
2248         struct net_device *dev = ssb_get_drvdata(sdev);
2249         struct b44 *bp = netdev_priv(dev);
2250
2251         if (!netif_running(dev))
2252                 return 0;
2253
2254         del_timer_sync(&bp->timer);
2255
2256         spin_lock_irq(&bp->lock);
2257
2258         b44_halt(bp);
2259         netif_carrier_off(bp->dev);
2260         netif_device_detach(bp->dev);
2261         b44_free_rings(bp);
2262
2263         spin_unlock_irq(&bp->lock);
2264
2265         free_irq(dev->irq, dev);
2266         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2267                 b44_init_hw(bp, B44_PARTIAL_RESET);
2268                 b44_setup_wol(bp);
2269         }
2270
2271         ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2272         return 0;
2273 }
2274
2275 static int b44_resume(struct ssb_device *sdev)
2276 {
2277         struct net_device *dev = ssb_get_drvdata(sdev);
2278         struct b44 *bp = netdev_priv(dev);
2279         int rc = 0;
2280
2281         rc = ssb_bus_powerup(sdev->bus, 0);
2282         if (rc) {
2283                 dev_err(sdev->dev,
2284                         "Failed to powerup the bus\n");
2285                 return rc;
2286         }
2287
2288         if (!netif_running(dev))
2289                 return 0;
2290
2291         rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2292         if (rc) {
2293                 netdev_err(dev, "request_irq failed\n");
2294                 return rc;
2295         }
2296
2297         spin_lock_irq(&bp->lock);
2298
2299         b44_init_rings(bp);
2300         b44_init_hw(bp, B44_FULL_RESET);
2301         netif_device_attach(bp->dev);
2302         spin_unlock_irq(&bp->lock);
2303
2304         b44_enable_ints(bp);
2305         netif_wake_queue(dev);
2306
2307         mod_timer(&bp->timer, jiffies + 1);
2308
2309         return 0;
2310 }
2311
2312 static struct ssb_driver b44_ssb_driver = {
2313         .name           = DRV_MODULE_NAME,
2314         .id_table       = b44_ssb_tbl,
2315         .probe          = b44_init_one,
2316         .remove         = __devexit_p(b44_remove_one),
2317         .suspend        = b44_suspend,
2318         .resume         = b44_resume,
2319 };
2320
2321 static inline int b44_pci_init(void)
2322 {
2323         int err = 0;
2324 #ifdef CONFIG_B44_PCI
2325         err = ssb_pcihost_register(&b44_pci_driver);
2326 #endif
2327         return err;
2328 }
2329
2330 static inline void b44_pci_exit(void)
2331 {
2332 #ifdef CONFIG_B44_PCI
2333         ssb_pcihost_unregister(&b44_pci_driver);
2334 #endif
2335 }
2336
2337 static int __init b44_init(void)
2338 {
2339         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2340         int err;
2341
2342         /* Setup paramaters for syncing RX/TX DMA descriptors */
2343         dma_desc_align_mask = ~(dma_desc_align_size - 1);
2344         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2345
2346         err = b44_pci_init();
2347         if (err)
2348                 return err;
2349         err = ssb_driver_register(&b44_ssb_driver);
2350         if (err)
2351                 b44_pci_exit();
2352         return err;
2353 }
2354
2355 static void __exit b44_cleanup(void)
2356 {
2357         ssb_driver_unregister(&b44_ssb_driver);
2358         b44_pci_exit();
2359 }
2360
2361 module_init(b44_init);
2362 module_exit(b44_cleanup);
2363