mac80211: Clear PS related flag on disabling power save.
[linux-2.6.git] / drivers / net / b44.c
1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6  * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7  * Copyright (C) 2006 Broadcom Corporation.
8  * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de>
9  *
10  * Distribute under GPL.
11  */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/mii.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
24 #include <linux/etherdevice.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/ssb/ssb.h>
30 #include <linux/slab.h>
31
32 #include <asm/uaccess.h>
33 #include <asm/io.h>
34 #include <asm/irq.h>
35
36
37 #include "b44.h"
38
39 #define DRV_MODULE_NAME         "b44"
40 #define DRV_MODULE_VERSION      "2.0"
41
42 #define B44_DEF_MSG_ENABLE        \
43         (NETIF_MSG_DRV          | \
44          NETIF_MSG_PROBE        | \
45          NETIF_MSG_LINK         | \
46          NETIF_MSG_TIMER        | \
47          NETIF_MSG_IFDOWN       | \
48          NETIF_MSG_IFUP         | \
49          NETIF_MSG_RX_ERR       | \
50          NETIF_MSG_TX_ERR)
51
52 /* length of time before we decide the hardware is borked,
53  * and dev->tx_timeout() should be called to fix the problem
54  */
55 #define B44_TX_TIMEOUT                  (5 * HZ)
56
57 /* hardware minimum and maximum for a single frame's data payload */
58 #define B44_MIN_MTU                     60
59 #define B44_MAX_MTU                     1500
60
61 #define B44_RX_RING_SIZE                512
62 #define B44_DEF_RX_RING_PENDING         200
63 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
64                                  B44_RX_RING_SIZE)
65 #define B44_TX_RING_SIZE                512
66 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
67 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
68                                  B44_TX_RING_SIZE)
69
70 #define TX_RING_GAP(BP) \
71         (B44_TX_RING_SIZE - (BP)->tx_pending)
72 #define TX_BUFFS_AVAIL(BP)                                              \
73         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
74           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
75           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
76 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
77
78 #define RX_PKT_OFFSET           (RX_HEADER_LEN + 2)
79 #define RX_PKT_BUF_SZ           (1536 + RX_PKT_OFFSET)
80
81 /* minimum number of free TX descriptors required to wake up TX process */
82 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
83
84 /* b44 internal pattern match filter info */
85 #define B44_PATTERN_BASE        0x400
86 #define B44_PATTERN_SIZE        0x80
87 #define B44_PMASK_BASE          0x600
88 #define B44_PMASK_SIZE          0x10
89 #define B44_MAX_PATTERNS        16
90 #define B44_ETHIPV6UDP_HLEN     62
91 #define B44_ETHIPV4UDP_HLEN     42
92
93 static char version[] __devinitdata =
94         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n";
95
96 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
97 MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver");
98 MODULE_LICENSE("GPL");
99 MODULE_VERSION(DRV_MODULE_VERSION);
100
101 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
102 module_param(b44_debug, int, 0);
103 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
104
105
106 #ifdef CONFIG_B44_PCI
107 static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
108         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
109         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
110         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
111         { 0 } /* terminate list with empty entry */
112 };
113 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
114
115 static struct pci_driver b44_pci_driver = {
116         .name           = DRV_MODULE_NAME,
117         .id_table       = b44_pci_tbl,
118 };
119 #endif /* CONFIG_B44_PCI */
120
121 static const struct ssb_device_id b44_ssb_tbl[] = {
122         SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
123         SSB_DEVTABLE_END
124 };
125 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
126
127 static void b44_halt(struct b44 *);
128 static void b44_init_rings(struct b44 *);
129
130 #define B44_FULL_RESET          1
131 #define B44_FULL_RESET_SKIP_PHY 2
132 #define B44_PARTIAL_RESET       3
133 #define B44_CHIP_RESET_FULL     4
134 #define B44_CHIP_RESET_PARTIAL  5
135
136 static void b44_init_hw(struct b44 *, int);
137
138 static int dma_desc_sync_size;
139 static int instance;
140
141 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
142 #define _B44(x...)      # x,
143 B44_STAT_REG_DECLARE
144 #undef _B44
145 };
146
147 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
148                                                 dma_addr_t dma_base,
149                                                 unsigned long offset,
150                                                 enum dma_data_direction dir)
151 {
152         dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
153                                    dma_desc_sync_size, dir);
154 }
155
156 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
157                                              dma_addr_t dma_base,
158                                              unsigned long offset,
159                                              enum dma_data_direction dir)
160 {
161         dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
162                                 dma_desc_sync_size, dir);
163 }
164
165 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
166 {
167         return ssb_read32(bp->sdev, reg);
168 }
169
170 static inline void bw32(const struct b44 *bp,
171                         unsigned long reg, unsigned long val)
172 {
173         ssb_write32(bp->sdev, reg, val);
174 }
175
176 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
177                         u32 bit, unsigned long timeout, const int clear)
178 {
179         unsigned long i;
180
181         for (i = 0; i < timeout; i++) {
182                 u32 val = br32(bp, reg);
183
184                 if (clear && !(val & bit))
185                         break;
186                 if (!clear && (val & bit))
187                         break;
188                 udelay(10);
189         }
190         if (i == timeout) {
191                 if (net_ratelimit())
192                         netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
193                                    bit, reg, clear ? "clear" : "set");
194
195                 return -ENODEV;
196         }
197         return 0;
198 }
199
200 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
201 {
202         u32 val;
203
204         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
205                             (index << CAM_CTRL_INDEX_SHIFT)));
206
207         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
208
209         val = br32(bp, B44_CAM_DATA_LO);
210
211         data[2] = (val >> 24) & 0xFF;
212         data[3] = (val >> 16) & 0xFF;
213         data[4] = (val >> 8) & 0xFF;
214         data[5] = (val >> 0) & 0xFF;
215
216         val = br32(bp, B44_CAM_DATA_HI);
217
218         data[0] = (val >> 8) & 0xFF;
219         data[1] = (val >> 0) & 0xFF;
220 }
221
222 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
223 {
224         u32 val;
225
226         val  = ((u32) data[2]) << 24;
227         val |= ((u32) data[3]) << 16;
228         val |= ((u32) data[4]) <<  8;
229         val |= ((u32) data[5]) <<  0;
230         bw32(bp, B44_CAM_DATA_LO, val);
231         val = (CAM_DATA_HI_VALID |
232                (((u32) data[0]) << 8) |
233                (((u32) data[1]) << 0));
234         bw32(bp, B44_CAM_DATA_HI, val);
235         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
236                             (index << CAM_CTRL_INDEX_SHIFT)));
237         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
238 }
239
240 static inline void __b44_disable_ints(struct b44 *bp)
241 {
242         bw32(bp, B44_IMASK, 0);
243 }
244
245 static void b44_disable_ints(struct b44 *bp)
246 {
247         __b44_disable_ints(bp);
248
249         /* Flush posted writes. */
250         br32(bp, B44_IMASK);
251 }
252
253 static void b44_enable_ints(struct b44 *bp)
254 {
255         bw32(bp, B44_IMASK, bp->imask);
256 }
257
258 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
259 {
260         int err;
261
262         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
263         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
264                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
265                              (phy_addr << MDIO_DATA_PMD_SHIFT) |
266                              (reg << MDIO_DATA_RA_SHIFT) |
267                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
268         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
269         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
270
271         return err;
272 }
273
274 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
275 {
276         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
277         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
278                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
279                              (phy_addr << MDIO_DATA_PMD_SHIFT) |
280                              (reg << MDIO_DATA_RA_SHIFT) |
281                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
282                              (val & MDIO_DATA_DATA)));
283         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
284 }
285
286 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
287 {
288         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
289                 return 0;
290
291         return __b44_readphy(bp, bp->phy_addr, reg, val);
292 }
293
294 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
295 {
296         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
297                 return 0;
298
299         return __b44_writephy(bp, bp->phy_addr, reg, val);
300 }
301
302 /* miilib interface */
303 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
304 {
305         u32 val;
306         struct b44 *bp = netdev_priv(dev);
307         int rc = __b44_readphy(bp, phy_id, location, &val);
308         if (rc)
309                 return 0xffffffff;
310         return val;
311 }
312
313 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
314                          int val)
315 {
316         struct b44 *bp = netdev_priv(dev);
317         __b44_writephy(bp, phy_id, location, val);
318 }
319
320 static int b44_phy_reset(struct b44 *bp)
321 {
322         u32 val;
323         int err;
324
325         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
326                 return 0;
327         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
328         if (err)
329                 return err;
330         udelay(100);
331         err = b44_readphy(bp, MII_BMCR, &val);
332         if (!err) {
333                 if (val & BMCR_RESET) {
334                         netdev_err(bp->dev, "PHY Reset would not complete\n");
335                         err = -ENODEV;
336                 }
337         }
338
339         return err;
340 }
341
342 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
343 {
344         u32 val;
345
346         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
347         bp->flags |= pause_flags;
348
349         val = br32(bp, B44_RXCONFIG);
350         if (pause_flags & B44_FLAG_RX_PAUSE)
351                 val |= RXCONFIG_FLOW;
352         else
353                 val &= ~RXCONFIG_FLOW;
354         bw32(bp, B44_RXCONFIG, val);
355
356         val = br32(bp, B44_MAC_FLOW);
357         if (pause_flags & B44_FLAG_TX_PAUSE)
358                 val |= (MAC_FLOW_PAUSE_ENAB |
359                         (0xc0 & MAC_FLOW_RX_HI_WATER));
360         else
361                 val &= ~MAC_FLOW_PAUSE_ENAB;
362         bw32(bp, B44_MAC_FLOW, val);
363 }
364
365 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
366 {
367         u32 pause_enab = 0;
368
369         /* The driver supports only rx pause by default because
370            the b44 mac tx pause mechanism generates excessive
371            pause frames.
372            Use ethtool to turn on b44 tx pause if necessary.
373          */
374         if ((local & ADVERTISE_PAUSE_CAP) &&
375             (local & ADVERTISE_PAUSE_ASYM)){
376                 if ((remote & LPA_PAUSE_ASYM) &&
377                     !(remote & LPA_PAUSE_CAP))
378                         pause_enab |= B44_FLAG_RX_PAUSE;
379         }
380
381         __b44_set_flow_ctrl(bp, pause_enab);
382 }
383
384 #ifdef CONFIG_BCM47XX
385 #include <asm/mach-bcm47xx/nvram.h>
386 static void b44_wap54g10_workaround(struct b44 *bp)
387 {
388         char buf[20];
389         u32 val;
390         int err;
391
392         /*
393          * workaround for bad hardware design in Linksys WAP54G v1.0
394          * see https://dev.openwrt.org/ticket/146
395          * check and reset bit "isolate"
396          */
397         if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
398                 return;
399         if (simple_strtoul(buf, NULL, 0) == 2) {
400                 err = __b44_readphy(bp, 0, MII_BMCR, &val);
401                 if (err)
402                         goto error;
403                 if (!(val & BMCR_ISOLATE))
404                         return;
405                 val &= ~BMCR_ISOLATE;
406                 err = __b44_writephy(bp, 0, MII_BMCR, val);
407                 if (err)
408                         goto error;
409         }
410         return;
411 error:
412         pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
413 }
414 #else
415 static inline void b44_wap54g10_workaround(struct b44 *bp)
416 {
417 }
418 #endif
419
420 static int b44_setup_phy(struct b44 *bp)
421 {
422         u32 val;
423         int err;
424
425         b44_wap54g10_workaround(bp);
426
427         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
428                 return 0;
429         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
430                 goto out;
431         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
432                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
433                 goto out;
434         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
435                 goto out;
436         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
437                                 val | MII_TLEDCTRL_ENABLE)) != 0)
438                 goto out;
439
440         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
441                 u32 adv = ADVERTISE_CSMA;
442
443                 if (bp->flags & B44_FLAG_ADV_10HALF)
444                         adv |= ADVERTISE_10HALF;
445                 if (bp->flags & B44_FLAG_ADV_10FULL)
446                         adv |= ADVERTISE_10FULL;
447                 if (bp->flags & B44_FLAG_ADV_100HALF)
448                         adv |= ADVERTISE_100HALF;
449                 if (bp->flags & B44_FLAG_ADV_100FULL)
450                         adv |= ADVERTISE_100FULL;
451
452                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
453                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
454
455                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
456                         goto out;
457                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
458                                                        BMCR_ANRESTART))) != 0)
459                         goto out;
460         } else {
461                 u32 bmcr;
462
463                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
464                         goto out;
465                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
466                 if (bp->flags & B44_FLAG_100_BASE_T)
467                         bmcr |= BMCR_SPEED100;
468                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
469                         bmcr |= BMCR_FULLDPLX;
470                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
471                         goto out;
472
473                 /* Since we will not be negotiating there is no safe way
474                  * to determine if the link partner supports flow control
475                  * or not.  So just disable it completely in this case.
476                  */
477                 b44_set_flow_ctrl(bp, 0, 0);
478         }
479
480 out:
481         return err;
482 }
483
484 static void b44_stats_update(struct b44 *bp)
485 {
486         unsigned long reg;
487         u32 *val;
488
489         val = &bp->hw_stats.tx_good_octets;
490         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
491                 *val++ += br32(bp, reg);
492         }
493
494         /* Pad */
495         reg += 8*4UL;
496
497         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
498                 *val++ += br32(bp, reg);
499         }
500 }
501
502 static void b44_link_report(struct b44 *bp)
503 {
504         if (!netif_carrier_ok(bp->dev)) {
505                 netdev_info(bp->dev, "Link is down\n");
506         } else {
507                 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
508                             (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
509                             (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
510
511                 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
512                             (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
513                             (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
514         }
515 }
516
517 static void b44_check_phy(struct b44 *bp)
518 {
519         u32 bmsr, aux;
520
521         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
522                 bp->flags |= B44_FLAG_100_BASE_T;
523                 bp->flags |= B44_FLAG_FULL_DUPLEX;
524                 if (!netif_carrier_ok(bp->dev)) {
525                         u32 val = br32(bp, B44_TX_CTRL);
526                         val |= TX_CTRL_DUPLEX;
527                         bw32(bp, B44_TX_CTRL, val);
528                         netif_carrier_on(bp->dev);
529                         b44_link_report(bp);
530                 }
531                 return;
532         }
533
534         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
535             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
536             (bmsr != 0xffff)) {
537                 if (aux & MII_AUXCTRL_SPEED)
538                         bp->flags |= B44_FLAG_100_BASE_T;
539                 else
540                         bp->flags &= ~B44_FLAG_100_BASE_T;
541                 if (aux & MII_AUXCTRL_DUPLEX)
542                         bp->flags |= B44_FLAG_FULL_DUPLEX;
543                 else
544                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
545
546                 if (!netif_carrier_ok(bp->dev) &&
547                     (bmsr & BMSR_LSTATUS)) {
548                         u32 val = br32(bp, B44_TX_CTRL);
549                         u32 local_adv, remote_adv;
550
551                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
552                                 val |= TX_CTRL_DUPLEX;
553                         else
554                                 val &= ~TX_CTRL_DUPLEX;
555                         bw32(bp, B44_TX_CTRL, val);
556
557                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
558                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
559                             !b44_readphy(bp, MII_LPA, &remote_adv))
560                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
561
562                         /* Link now up */
563                         netif_carrier_on(bp->dev);
564                         b44_link_report(bp);
565                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
566                         /* Link now down */
567                         netif_carrier_off(bp->dev);
568                         b44_link_report(bp);
569                 }
570
571                 if (bmsr & BMSR_RFAULT)
572                         netdev_warn(bp->dev, "Remote fault detected in PHY\n");
573                 if (bmsr & BMSR_JCD)
574                         netdev_warn(bp->dev, "Jabber detected in PHY\n");
575         }
576 }
577
578 static void b44_timer(unsigned long __opaque)
579 {
580         struct b44 *bp = (struct b44 *) __opaque;
581
582         spin_lock_irq(&bp->lock);
583
584         b44_check_phy(bp);
585
586         b44_stats_update(bp);
587
588         spin_unlock_irq(&bp->lock);
589
590         mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
591 }
592
593 static void b44_tx(struct b44 *bp)
594 {
595         u32 cur, cons;
596
597         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
598         cur /= sizeof(struct dma_desc);
599
600         /* XXX needs updating when NETIF_F_SG is supported */
601         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
602                 struct ring_info *rp = &bp->tx_buffers[cons];
603                 struct sk_buff *skb = rp->skb;
604
605                 BUG_ON(skb == NULL);
606
607                 dma_unmap_single(bp->sdev->dma_dev,
608                                  rp->mapping,
609                                  skb->len,
610                                  DMA_TO_DEVICE);
611                 rp->skb = NULL;
612                 dev_kfree_skb_irq(skb);
613         }
614
615         bp->tx_cons = cons;
616         if (netif_queue_stopped(bp->dev) &&
617             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
618                 netif_wake_queue(bp->dev);
619
620         bw32(bp, B44_GPTIMER, 0);
621 }
622
623 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
624  * before the DMA address you give it.  So we allocate 30 more bytes
625  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
626  * point the chip at 30 bytes past where the rx_header will go.
627  */
628 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
629 {
630         struct dma_desc *dp;
631         struct ring_info *src_map, *map;
632         struct rx_header *rh;
633         struct sk_buff *skb;
634         dma_addr_t mapping;
635         int dest_idx;
636         u32 ctrl;
637
638         src_map = NULL;
639         if (src_idx >= 0)
640                 src_map = &bp->rx_buffers[src_idx];
641         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
642         map = &bp->rx_buffers[dest_idx];
643         skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
644         if (skb == NULL)
645                 return -ENOMEM;
646
647         mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
648                                  RX_PKT_BUF_SZ,
649                                  DMA_FROM_DEVICE);
650
651         /* Hardware bug work-around, the chip is unable to do PCI DMA
652            to/from anything above 1GB :-( */
653         if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
654                 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
655                 /* Sigh... */
656                 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
657                         dma_unmap_single(bp->sdev->dma_dev, mapping,
658                                              RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
659                 dev_kfree_skb_any(skb);
660                 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
661                 if (skb == NULL)
662                         return -ENOMEM;
663                 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
664                                          RX_PKT_BUF_SZ,
665                                          DMA_FROM_DEVICE);
666                 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
667                     mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
668                         if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
669                                 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
670                         dev_kfree_skb_any(skb);
671                         return -ENOMEM;
672                 }
673                 bp->force_copybreak = 1;
674         }
675
676         rh = (struct rx_header *) skb->data;
677
678         rh->len = 0;
679         rh->flags = 0;
680
681         map->skb = skb;
682         map->mapping = mapping;
683
684         if (src_map != NULL)
685                 src_map->skb = NULL;
686
687         ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
688         if (dest_idx == (B44_RX_RING_SIZE - 1))
689                 ctrl |= DESC_CTRL_EOT;
690
691         dp = &bp->rx_ring[dest_idx];
692         dp->ctrl = cpu_to_le32(ctrl);
693         dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
694
695         if (bp->flags & B44_FLAG_RX_RING_HACK)
696                 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
697                                             dest_idx * sizeof(*dp),
698                                             DMA_BIDIRECTIONAL);
699
700         return RX_PKT_BUF_SZ;
701 }
702
703 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
704 {
705         struct dma_desc *src_desc, *dest_desc;
706         struct ring_info *src_map, *dest_map;
707         struct rx_header *rh;
708         int dest_idx;
709         __le32 ctrl;
710
711         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
712         dest_desc = &bp->rx_ring[dest_idx];
713         dest_map = &bp->rx_buffers[dest_idx];
714         src_desc = &bp->rx_ring[src_idx];
715         src_map = &bp->rx_buffers[src_idx];
716
717         dest_map->skb = src_map->skb;
718         rh = (struct rx_header *) src_map->skb->data;
719         rh->len = 0;
720         rh->flags = 0;
721         dest_map->mapping = src_map->mapping;
722
723         if (bp->flags & B44_FLAG_RX_RING_HACK)
724                 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
725                                          src_idx * sizeof(*src_desc),
726                                          DMA_BIDIRECTIONAL);
727
728         ctrl = src_desc->ctrl;
729         if (dest_idx == (B44_RX_RING_SIZE - 1))
730                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
731         else
732                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
733
734         dest_desc->ctrl = ctrl;
735         dest_desc->addr = src_desc->addr;
736
737         src_map->skb = NULL;
738
739         if (bp->flags & B44_FLAG_RX_RING_HACK)
740                 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
741                                              dest_idx * sizeof(*dest_desc),
742                                              DMA_BIDIRECTIONAL);
743
744         dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
745                                    RX_PKT_BUF_SZ,
746                                    DMA_FROM_DEVICE);
747 }
748
749 static int b44_rx(struct b44 *bp, int budget)
750 {
751         int received;
752         u32 cons, prod;
753
754         received = 0;
755         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
756         prod /= sizeof(struct dma_desc);
757         cons = bp->rx_cons;
758
759         while (cons != prod && budget > 0) {
760                 struct ring_info *rp = &bp->rx_buffers[cons];
761                 struct sk_buff *skb = rp->skb;
762                 dma_addr_t map = rp->mapping;
763                 struct rx_header *rh;
764                 u16 len;
765
766                 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
767                                         RX_PKT_BUF_SZ,
768                                         DMA_FROM_DEVICE);
769                 rh = (struct rx_header *) skb->data;
770                 len = le16_to_cpu(rh->len);
771                 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
772                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
773                 drop_it:
774                         b44_recycle_rx(bp, cons, bp->rx_prod);
775                 drop_it_no_recycle:
776                         bp->dev->stats.rx_dropped++;
777                         goto next_pkt;
778                 }
779
780                 if (len == 0) {
781                         int i = 0;
782
783                         do {
784                                 udelay(2);
785                                 barrier();
786                                 len = le16_to_cpu(rh->len);
787                         } while (len == 0 && i++ < 5);
788                         if (len == 0)
789                                 goto drop_it;
790                 }
791
792                 /* Omit CRC. */
793                 len -= 4;
794
795                 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
796                         int skb_size;
797                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
798                         if (skb_size < 0)
799                                 goto drop_it;
800                         dma_unmap_single(bp->sdev->dma_dev, map,
801                                          skb_size, DMA_FROM_DEVICE);
802                         /* Leave out rx_header */
803                         skb_put(skb, len + RX_PKT_OFFSET);
804                         skb_pull(skb, RX_PKT_OFFSET);
805                 } else {
806                         struct sk_buff *copy_skb;
807
808                         b44_recycle_rx(bp, cons, bp->rx_prod);
809                         copy_skb = netdev_alloc_skb(bp->dev, len + 2);
810                         if (copy_skb == NULL)
811                                 goto drop_it_no_recycle;
812
813                         skb_reserve(copy_skb, 2);
814                         skb_put(copy_skb, len);
815                         /* DMA sync done above, copy just the actual packet */
816                         skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
817                                                          copy_skb->data, len);
818                         skb = copy_skb;
819                 }
820                 skb_checksum_none_assert(skb);
821                 skb->protocol = eth_type_trans(skb, bp->dev);
822                 netif_receive_skb(skb);
823                 received++;
824                 budget--;
825         next_pkt:
826                 bp->rx_prod = (bp->rx_prod + 1) &
827                         (B44_RX_RING_SIZE - 1);
828                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
829         }
830
831         bp->rx_cons = cons;
832         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
833
834         return received;
835 }
836
837 static int b44_poll(struct napi_struct *napi, int budget)
838 {
839         struct b44 *bp = container_of(napi, struct b44, napi);
840         int work_done;
841         unsigned long flags;
842
843         spin_lock_irqsave(&bp->lock, flags);
844
845         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
846                 /* spin_lock(&bp->tx_lock); */
847                 b44_tx(bp);
848                 /* spin_unlock(&bp->tx_lock); */
849         }
850         if (bp->istat & ISTAT_RFO) {    /* fast recovery, in ~20msec */
851                 bp->istat &= ~ISTAT_RFO;
852                 b44_disable_ints(bp);
853                 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
854                 b44_init_rings(bp);
855                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
856                 netif_wake_queue(bp->dev);
857         }
858
859         spin_unlock_irqrestore(&bp->lock, flags);
860
861         work_done = 0;
862         if (bp->istat & ISTAT_RX)
863                 work_done += b44_rx(bp, budget);
864
865         if (bp->istat & ISTAT_ERRORS) {
866                 spin_lock_irqsave(&bp->lock, flags);
867                 b44_halt(bp);
868                 b44_init_rings(bp);
869                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
870                 netif_wake_queue(bp->dev);
871                 spin_unlock_irqrestore(&bp->lock, flags);
872                 work_done = 0;
873         }
874
875         if (work_done < budget) {
876                 napi_complete(napi);
877                 b44_enable_ints(bp);
878         }
879
880         return work_done;
881 }
882
883 static irqreturn_t b44_interrupt(int irq, void *dev_id)
884 {
885         struct net_device *dev = dev_id;
886         struct b44 *bp = netdev_priv(dev);
887         u32 istat, imask;
888         int handled = 0;
889
890         spin_lock(&bp->lock);
891
892         istat = br32(bp, B44_ISTAT);
893         imask = br32(bp, B44_IMASK);
894
895         /* The interrupt mask register controls which interrupt bits
896          * will actually raise an interrupt to the CPU when set by hw/firmware,
897          * but doesn't mask off the bits.
898          */
899         istat &= imask;
900         if (istat) {
901                 handled = 1;
902
903                 if (unlikely(!netif_running(dev))) {
904                         netdev_info(dev, "late interrupt\n");
905                         goto irq_ack;
906                 }
907
908                 if (napi_schedule_prep(&bp->napi)) {
909                         /* NOTE: These writes are posted by the readback of
910                          *       the ISTAT register below.
911                          */
912                         bp->istat = istat;
913                         __b44_disable_ints(bp);
914                         __napi_schedule(&bp->napi);
915                 }
916
917 irq_ack:
918                 bw32(bp, B44_ISTAT, istat);
919                 br32(bp, B44_ISTAT);
920         }
921         spin_unlock(&bp->lock);
922         return IRQ_RETVAL(handled);
923 }
924
925 static void b44_tx_timeout(struct net_device *dev)
926 {
927         struct b44 *bp = netdev_priv(dev);
928
929         netdev_err(dev, "transmit timed out, resetting\n");
930
931         spin_lock_irq(&bp->lock);
932
933         b44_halt(bp);
934         b44_init_rings(bp);
935         b44_init_hw(bp, B44_FULL_RESET);
936
937         spin_unlock_irq(&bp->lock);
938
939         b44_enable_ints(bp);
940
941         netif_wake_queue(dev);
942 }
943
944 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
945 {
946         struct b44 *bp = netdev_priv(dev);
947         int rc = NETDEV_TX_OK;
948         dma_addr_t mapping;
949         u32 len, entry, ctrl;
950         unsigned long flags;
951
952         len = skb->len;
953         spin_lock_irqsave(&bp->lock, flags);
954
955         /* This is a hard error, log it. */
956         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
957                 netif_stop_queue(dev);
958                 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
959                 goto err_out;
960         }
961
962         mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
963         if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
964                 struct sk_buff *bounce_skb;
965
966                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
967                 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
968                         dma_unmap_single(bp->sdev->dma_dev, mapping, len,
969                                              DMA_TO_DEVICE);
970
971                 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
972                 if (!bounce_skb)
973                         goto err_out;
974
975                 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
976                                          len, DMA_TO_DEVICE);
977                 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
978                         if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
979                                 dma_unmap_single(bp->sdev->dma_dev, mapping,
980                                                      len, DMA_TO_DEVICE);
981                         dev_kfree_skb_any(bounce_skb);
982                         goto err_out;
983                 }
984
985                 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
986                 dev_kfree_skb_any(skb);
987                 skb = bounce_skb;
988         }
989
990         entry = bp->tx_prod;
991         bp->tx_buffers[entry].skb = skb;
992         bp->tx_buffers[entry].mapping = mapping;
993
994         ctrl  = (len & DESC_CTRL_LEN);
995         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
996         if (entry == (B44_TX_RING_SIZE - 1))
997                 ctrl |= DESC_CTRL_EOT;
998
999         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1000         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1001
1002         if (bp->flags & B44_FLAG_TX_RING_HACK)
1003                 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1004                                             entry * sizeof(bp->tx_ring[0]),
1005                                             DMA_TO_DEVICE);
1006
1007         entry = NEXT_TX(entry);
1008
1009         bp->tx_prod = entry;
1010
1011         wmb();
1012
1013         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1014         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1015                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1016         if (bp->flags & B44_FLAG_REORDER_BUG)
1017                 br32(bp, B44_DMATX_PTR);
1018
1019         if (TX_BUFFS_AVAIL(bp) < 1)
1020                 netif_stop_queue(dev);
1021
1022 out_unlock:
1023         spin_unlock_irqrestore(&bp->lock, flags);
1024
1025         return rc;
1026
1027 err_out:
1028         rc = NETDEV_TX_BUSY;
1029         goto out_unlock;
1030 }
1031
1032 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1033 {
1034         struct b44 *bp = netdev_priv(dev);
1035
1036         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1037                 return -EINVAL;
1038
1039         if (!netif_running(dev)) {
1040                 /* We'll just catch it later when the
1041                  * device is up'd.
1042                  */
1043                 dev->mtu = new_mtu;
1044                 return 0;
1045         }
1046
1047         spin_lock_irq(&bp->lock);
1048         b44_halt(bp);
1049         dev->mtu = new_mtu;
1050         b44_init_rings(bp);
1051         b44_init_hw(bp, B44_FULL_RESET);
1052         spin_unlock_irq(&bp->lock);
1053
1054         b44_enable_ints(bp);
1055
1056         return 0;
1057 }
1058
1059 /* Free up pending packets in all rx/tx rings.
1060  *
1061  * The chip has been shut down and the driver detached from
1062  * the networking, so no interrupts or new tx packets will
1063  * end up in the driver.  bp->lock is not held and we are not
1064  * in an interrupt context and thus may sleep.
1065  */
1066 static void b44_free_rings(struct b44 *bp)
1067 {
1068         struct ring_info *rp;
1069         int i;
1070
1071         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1072                 rp = &bp->rx_buffers[i];
1073
1074                 if (rp->skb == NULL)
1075                         continue;
1076                 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1077                                  DMA_FROM_DEVICE);
1078                 dev_kfree_skb_any(rp->skb);
1079                 rp->skb = NULL;
1080         }
1081
1082         /* XXX needs changes once NETIF_F_SG is set... */
1083         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1084                 rp = &bp->tx_buffers[i];
1085
1086                 if (rp->skb == NULL)
1087                         continue;
1088                 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1089                                  DMA_TO_DEVICE);
1090                 dev_kfree_skb_any(rp->skb);
1091                 rp->skb = NULL;
1092         }
1093 }
1094
1095 /* Initialize tx/rx rings for packet processing.
1096  *
1097  * The chip has been shut down and the driver detached from
1098  * the networking, so no interrupts or new tx packets will
1099  * end up in the driver.
1100  */
1101 static void b44_init_rings(struct b44 *bp)
1102 {
1103         int i;
1104
1105         b44_free_rings(bp);
1106
1107         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1108         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1109
1110         if (bp->flags & B44_FLAG_RX_RING_HACK)
1111                 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1112                                            DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1113
1114         if (bp->flags & B44_FLAG_TX_RING_HACK)
1115                 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1116                                            DMA_TABLE_BYTES, DMA_TO_DEVICE);
1117
1118         for (i = 0; i < bp->rx_pending; i++) {
1119                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1120                         break;
1121         }
1122 }
1123
1124 /*
1125  * Must not be invoked with interrupt sources disabled and
1126  * the hardware shutdown down.
1127  */
1128 static void b44_free_consistent(struct b44 *bp)
1129 {
1130         kfree(bp->rx_buffers);
1131         bp->rx_buffers = NULL;
1132         kfree(bp->tx_buffers);
1133         bp->tx_buffers = NULL;
1134         if (bp->rx_ring) {
1135                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1136                         dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1137                                          DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1138                         kfree(bp->rx_ring);
1139                 } else
1140                         dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1141                                           bp->rx_ring, bp->rx_ring_dma);
1142                 bp->rx_ring = NULL;
1143                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1144         }
1145         if (bp->tx_ring) {
1146                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1147                         dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1148                                          DMA_TABLE_BYTES, DMA_TO_DEVICE);
1149                         kfree(bp->tx_ring);
1150                 } else
1151                         dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1152                                           bp->tx_ring, bp->tx_ring_dma);
1153                 bp->tx_ring = NULL;
1154                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1155         }
1156 }
1157
1158 /*
1159  * Must not be invoked with interrupt sources disabled and
1160  * the hardware shutdown down.  Can sleep.
1161  */
1162 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1163 {
1164         int size;
1165
1166         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1167         bp->rx_buffers = kzalloc(size, gfp);
1168         if (!bp->rx_buffers)
1169                 goto out_err;
1170
1171         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1172         bp->tx_buffers = kzalloc(size, gfp);
1173         if (!bp->tx_buffers)
1174                 goto out_err;
1175
1176         size = DMA_TABLE_BYTES;
1177         bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1178                                          &bp->rx_ring_dma, gfp);
1179         if (!bp->rx_ring) {
1180                 /* Allocation may have failed due to pci_alloc_consistent
1181                    insisting on use of GFP_DMA, which is more restrictive
1182                    than necessary...  */
1183                 struct dma_desc *rx_ring;
1184                 dma_addr_t rx_ring_dma;
1185
1186                 rx_ring = kzalloc(size, gfp);
1187                 if (!rx_ring)
1188                         goto out_err;
1189
1190                 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1191                                              DMA_TABLE_BYTES,
1192                                              DMA_BIDIRECTIONAL);
1193
1194                 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1195                         rx_ring_dma + size > DMA_BIT_MASK(30)) {
1196                         kfree(rx_ring);
1197                         goto out_err;
1198                 }
1199
1200                 bp->rx_ring = rx_ring;
1201                 bp->rx_ring_dma = rx_ring_dma;
1202                 bp->flags |= B44_FLAG_RX_RING_HACK;
1203         }
1204
1205         bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1206                                          &bp->tx_ring_dma, gfp);
1207         if (!bp->tx_ring) {
1208                 /* Allocation may have failed due to ssb_dma_alloc_consistent
1209                    insisting on use of GFP_DMA, which is more restrictive
1210                    than necessary...  */
1211                 struct dma_desc *tx_ring;
1212                 dma_addr_t tx_ring_dma;
1213
1214                 tx_ring = kzalloc(size, gfp);
1215                 if (!tx_ring)
1216                         goto out_err;
1217
1218                 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1219                                              DMA_TABLE_BYTES,
1220                                              DMA_TO_DEVICE);
1221
1222                 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1223                         tx_ring_dma + size > DMA_BIT_MASK(30)) {
1224                         kfree(tx_ring);
1225                         goto out_err;
1226                 }
1227
1228                 bp->tx_ring = tx_ring;
1229                 bp->tx_ring_dma = tx_ring_dma;
1230                 bp->flags |= B44_FLAG_TX_RING_HACK;
1231         }
1232
1233         return 0;
1234
1235 out_err:
1236         b44_free_consistent(bp);
1237         return -ENOMEM;
1238 }
1239
1240 /* bp->lock is held. */
1241 static void b44_clear_stats(struct b44 *bp)
1242 {
1243         unsigned long reg;
1244
1245         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1246         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1247                 br32(bp, reg);
1248         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1249                 br32(bp, reg);
1250 }
1251
1252 /* bp->lock is held. */
1253 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1254 {
1255         struct ssb_device *sdev = bp->sdev;
1256         bool was_enabled;
1257
1258         was_enabled = ssb_device_is_enabled(bp->sdev);
1259
1260         ssb_device_enable(bp->sdev, 0);
1261         ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1262
1263         if (was_enabled) {
1264                 bw32(bp, B44_RCV_LAZY, 0);
1265                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1266                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1267                 bw32(bp, B44_DMATX_CTRL, 0);
1268                 bp->tx_prod = bp->tx_cons = 0;
1269                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1270                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1271                                      100, 0);
1272                 }
1273                 bw32(bp, B44_DMARX_CTRL, 0);
1274                 bp->rx_prod = bp->rx_cons = 0;
1275         }
1276
1277         b44_clear_stats(bp);
1278
1279         /*
1280          * Don't enable PHY if we are doing a partial reset
1281          * we are probably going to power down
1282          */
1283         if (reset_kind == B44_CHIP_RESET_PARTIAL)
1284                 return;
1285
1286         switch (sdev->bus->bustype) {
1287         case SSB_BUSTYPE_SSB:
1288                 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1289                      (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1290                                         B44_MDC_RATIO)
1291                      & MDIO_CTRL_MAXF_MASK)));
1292                 break;
1293         case SSB_BUSTYPE_PCI:
1294                 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1295                      (0x0d & MDIO_CTRL_MAXF_MASK)));
1296                 break;
1297         case SSB_BUSTYPE_PCMCIA:
1298         case SSB_BUSTYPE_SDIO:
1299                 WARN_ON(1); /* A device with this bus does not exist. */
1300                 break;
1301         }
1302
1303         br32(bp, B44_MDIO_CTRL);
1304
1305         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1306                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1307                 br32(bp, B44_ENET_CTRL);
1308                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1309         } else {
1310                 u32 val = br32(bp, B44_DEVCTRL);
1311
1312                 if (val & DEVCTRL_EPR) {
1313                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1314                         br32(bp, B44_DEVCTRL);
1315                         udelay(100);
1316                 }
1317                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1318         }
1319 }
1320
1321 /* bp->lock is held. */
1322 static void b44_halt(struct b44 *bp)
1323 {
1324         b44_disable_ints(bp);
1325         /* reset PHY */
1326         b44_phy_reset(bp);
1327         /* power down PHY */
1328         netdev_info(bp->dev, "powering down PHY\n");
1329         bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1330         /* now reset the chip, but without enabling the MAC&PHY
1331          * part of it. This has to be done _after_ we shut down the PHY */
1332         b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1333 }
1334
1335 /* bp->lock is held. */
1336 static void __b44_set_mac_addr(struct b44 *bp)
1337 {
1338         bw32(bp, B44_CAM_CTRL, 0);
1339         if (!(bp->dev->flags & IFF_PROMISC)) {
1340                 u32 val;
1341
1342                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1343                 val = br32(bp, B44_CAM_CTRL);
1344                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1345         }
1346 }
1347
1348 static int b44_set_mac_addr(struct net_device *dev, void *p)
1349 {
1350         struct b44 *bp = netdev_priv(dev);
1351         struct sockaddr *addr = p;
1352         u32 val;
1353
1354         if (netif_running(dev))
1355                 return -EBUSY;
1356
1357         if (!is_valid_ether_addr(addr->sa_data))
1358                 return -EINVAL;
1359
1360         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1361
1362         spin_lock_irq(&bp->lock);
1363
1364         val = br32(bp, B44_RXCONFIG);
1365         if (!(val & RXCONFIG_CAM_ABSENT))
1366                 __b44_set_mac_addr(bp);
1367
1368         spin_unlock_irq(&bp->lock);
1369
1370         return 0;
1371 }
1372
1373 /* Called at device open time to get the chip ready for
1374  * packet processing.  Invoked with bp->lock held.
1375  */
1376 static void __b44_set_rx_mode(struct net_device *);
1377 static void b44_init_hw(struct b44 *bp, int reset_kind)
1378 {
1379         u32 val;
1380
1381         b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1382         if (reset_kind == B44_FULL_RESET) {
1383                 b44_phy_reset(bp);
1384                 b44_setup_phy(bp);
1385         }
1386
1387         /* Enable CRC32, set proper LED modes and power on PHY */
1388         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1389         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1390
1391         /* This sets the MAC address too.  */
1392         __b44_set_rx_mode(bp->dev);
1393
1394         /* MTU + eth header + possible VLAN tag + struct rx_header */
1395         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1396         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1397
1398         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1399         if (reset_kind == B44_PARTIAL_RESET) {
1400                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1401                                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1402         } else {
1403                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1404                 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1405                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1406                                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1407                 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1408
1409                 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1410                 bp->rx_prod = bp->rx_pending;
1411
1412                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1413         }
1414
1415         val = br32(bp, B44_ENET_CTRL);
1416         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1417 }
1418
1419 static int b44_open(struct net_device *dev)
1420 {
1421         struct b44 *bp = netdev_priv(dev);
1422         int err;
1423
1424         err = b44_alloc_consistent(bp, GFP_KERNEL);
1425         if (err)
1426                 goto out;
1427
1428         napi_enable(&bp->napi);
1429
1430         b44_init_rings(bp);
1431         b44_init_hw(bp, B44_FULL_RESET);
1432
1433         b44_check_phy(bp);
1434
1435         err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1436         if (unlikely(err < 0)) {
1437                 napi_disable(&bp->napi);
1438                 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1439                 b44_free_rings(bp);
1440                 b44_free_consistent(bp);
1441                 goto out;
1442         }
1443
1444         init_timer(&bp->timer);
1445         bp->timer.expires = jiffies + HZ;
1446         bp->timer.data = (unsigned long) bp;
1447         bp->timer.function = b44_timer;
1448         add_timer(&bp->timer);
1449
1450         b44_enable_ints(bp);
1451         netif_start_queue(dev);
1452 out:
1453         return err;
1454 }
1455
1456 #ifdef CONFIG_NET_POLL_CONTROLLER
1457 /*
1458  * Polling receive - used by netconsole and other diagnostic tools
1459  * to allow network i/o with interrupts disabled.
1460  */
1461 static void b44_poll_controller(struct net_device *dev)
1462 {
1463         disable_irq(dev->irq);
1464         b44_interrupt(dev->irq, dev);
1465         enable_irq(dev->irq);
1466 }
1467 #endif
1468
1469 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1470 {
1471         u32 i;
1472         u32 *pattern = (u32 *) pp;
1473
1474         for (i = 0; i < bytes; i += sizeof(u32)) {
1475                 bw32(bp, B44_FILT_ADDR, table_offset + i);
1476                 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1477         }
1478 }
1479
1480 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1481 {
1482         int magicsync = 6;
1483         int k, j, len = offset;
1484         int ethaddr_bytes = ETH_ALEN;
1485
1486         memset(ppattern + offset, 0xff, magicsync);
1487         for (j = 0; j < magicsync; j++)
1488                 set_bit(len++, (unsigned long *) pmask);
1489
1490         for (j = 0; j < B44_MAX_PATTERNS; j++) {
1491                 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1492                         ethaddr_bytes = ETH_ALEN;
1493                 else
1494                         ethaddr_bytes = B44_PATTERN_SIZE - len;
1495                 if (ethaddr_bytes <=0)
1496                         break;
1497                 for (k = 0; k< ethaddr_bytes; k++) {
1498                         ppattern[offset + magicsync +
1499                                 (j * ETH_ALEN) + k] = macaddr[k];
1500                         set_bit(len++, (unsigned long *) pmask);
1501                 }
1502         }
1503         return len - 1;
1504 }
1505
1506 /* Setup magic packet patterns in the b44 WOL
1507  * pattern matching filter.
1508  */
1509 static void b44_setup_pseudo_magicp(struct b44 *bp)
1510 {
1511
1512         u32 val;
1513         int plen0, plen1, plen2;
1514         u8 *pwol_pattern;
1515         u8 pwol_mask[B44_PMASK_SIZE];
1516
1517         pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1518         if (!pwol_pattern) {
1519                 pr_err("Memory not available for WOL\n");
1520                 return;
1521         }
1522
1523         /* Ipv4 magic packet pattern - pattern 0.*/
1524         memset(pwol_mask, 0, B44_PMASK_SIZE);
1525         plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1526                                   B44_ETHIPV4UDP_HLEN);
1527
1528         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1529         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1530
1531         /* Raw ethernet II magic packet pattern - pattern 1 */
1532         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1533         memset(pwol_mask, 0, B44_PMASK_SIZE);
1534         plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1535                                   ETH_HLEN);
1536
1537         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1538                        B44_PATTERN_BASE + B44_PATTERN_SIZE);
1539         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1540                        B44_PMASK_BASE + B44_PMASK_SIZE);
1541
1542         /* Ipv6 magic packet pattern - pattern 2 */
1543         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1544         memset(pwol_mask, 0, B44_PMASK_SIZE);
1545         plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1546                                   B44_ETHIPV6UDP_HLEN);
1547
1548         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1549                        B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1550         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1551                        B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1552
1553         kfree(pwol_pattern);
1554
1555         /* set these pattern's lengths: one less than each real length */
1556         val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1557         bw32(bp, B44_WKUP_LEN, val);
1558
1559         /* enable wakeup pattern matching */
1560         val = br32(bp, B44_DEVCTRL);
1561         bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1562
1563 }
1564
1565 #ifdef CONFIG_B44_PCI
1566 static void b44_setup_wol_pci(struct b44 *bp)
1567 {
1568         u16 val;
1569
1570         if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1571                 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1572                 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1573                 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1574         }
1575 }
1576 #else
1577 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1578 #endif /* CONFIG_B44_PCI */
1579
1580 static void b44_setup_wol(struct b44 *bp)
1581 {
1582         u32 val;
1583
1584         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1585
1586         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1587
1588                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1589
1590                 val = bp->dev->dev_addr[2] << 24 |
1591                         bp->dev->dev_addr[3] << 16 |
1592                         bp->dev->dev_addr[4] << 8 |
1593                         bp->dev->dev_addr[5];
1594                 bw32(bp, B44_ADDR_LO, val);
1595
1596                 val = bp->dev->dev_addr[0] << 8 |
1597                         bp->dev->dev_addr[1];
1598                 bw32(bp, B44_ADDR_HI, val);
1599
1600                 val = br32(bp, B44_DEVCTRL);
1601                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1602
1603         } else {
1604                 b44_setup_pseudo_magicp(bp);
1605         }
1606         b44_setup_wol_pci(bp);
1607 }
1608
1609 static int b44_close(struct net_device *dev)
1610 {
1611         struct b44 *bp = netdev_priv(dev);
1612
1613         netif_stop_queue(dev);
1614
1615         napi_disable(&bp->napi);
1616
1617         del_timer_sync(&bp->timer);
1618
1619         spin_lock_irq(&bp->lock);
1620
1621         b44_halt(bp);
1622         b44_free_rings(bp);
1623         netif_carrier_off(dev);
1624
1625         spin_unlock_irq(&bp->lock);
1626
1627         free_irq(dev->irq, dev);
1628
1629         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1630                 b44_init_hw(bp, B44_PARTIAL_RESET);
1631                 b44_setup_wol(bp);
1632         }
1633
1634         b44_free_consistent(bp);
1635
1636         return 0;
1637 }
1638
1639 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1640 {
1641         struct b44 *bp = netdev_priv(dev);
1642         struct net_device_stats *nstat = &dev->stats;
1643         struct b44_hw_stats *hwstat = &bp->hw_stats;
1644
1645         /* Convert HW stats into netdevice stats. */
1646         nstat->rx_packets = hwstat->rx_pkts;
1647         nstat->tx_packets = hwstat->tx_pkts;
1648         nstat->rx_bytes   = hwstat->rx_octets;
1649         nstat->tx_bytes   = hwstat->tx_octets;
1650         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1651                              hwstat->tx_oversize_pkts +
1652                              hwstat->tx_underruns +
1653                              hwstat->tx_excessive_cols +
1654                              hwstat->tx_late_cols);
1655         nstat->multicast  = hwstat->tx_multicast_pkts;
1656         nstat->collisions = hwstat->tx_total_cols;
1657
1658         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1659                                    hwstat->rx_undersize);
1660         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1661         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1662         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1663         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1664                                    hwstat->rx_oversize_pkts +
1665                                    hwstat->rx_missed_pkts +
1666                                    hwstat->rx_crc_align_errs +
1667                                    hwstat->rx_undersize +
1668                                    hwstat->rx_crc_errs +
1669                                    hwstat->rx_align_errs +
1670                                    hwstat->rx_symbol_errs);
1671
1672         nstat->tx_aborted_errors = hwstat->tx_underruns;
1673 #if 0
1674         /* Carrier lost counter seems to be broken for some devices */
1675         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1676 #endif
1677
1678         return nstat;
1679 }
1680
1681 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1682 {
1683         struct netdev_hw_addr *ha;
1684         int i, num_ents;
1685
1686         num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1687         i = 0;
1688         netdev_for_each_mc_addr(ha, dev) {
1689                 if (i == num_ents)
1690                         break;
1691                 __b44_cam_write(bp, ha->addr, i++ + 1);
1692         }
1693         return i+1;
1694 }
1695
1696 static void __b44_set_rx_mode(struct net_device *dev)
1697 {
1698         struct b44 *bp = netdev_priv(dev);
1699         u32 val;
1700
1701         val = br32(bp, B44_RXCONFIG);
1702         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1703         if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1704                 val |= RXCONFIG_PROMISC;
1705                 bw32(bp, B44_RXCONFIG, val);
1706         } else {
1707                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1708                 int i = 1;
1709
1710                 __b44_set_mac_addr(bp);
1711
1712                 if ((dev->flags & IFF_ALLMULTI) ||
1713                     (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1714                         val |= RXCONFIG_ALLMULTI;
1715                 else
1716                         i = __b44_load_mcast(bp, dev);
1717
1718                 for (; i < 64; i++)
1719                         __b44_cam_write(bp, zero, i);
1720
1721                 bw32(bp, B44_RXCONFIG, val);
1722                 val = br32(bp, B44_CAM_CTRL);
1723                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1724         }
1725 }
1726
1727 static void b44_set_rx_mode(struct net_device *dev)
1728 {
1729         struct b44 *bp = netdev_priv(dev);
1730
1731         spin_lock_irq(&bp->lock);
1732         __b44_set_rx_mode(dev);
1733         spin_unlock_irq(&bp->lock);
1734 }
1735
1736 static u32 b44_get_msglevel(struct net_device *dev)
1737 {
1738         struct b44 *bp = netdev_priv(dev);
1739         return bp->msg_enable;
1740 }
1741
1742 static void b44_set_msglevel(struct net_device *dev, u32 value)
1743 {
1744         struct b44 *bp = netdev_priv(dev);
1745         bp->msg_enable = value;
1746 }
1747
1748 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1749 {
1750         struct b44 *bp = netdev_priv(dev);
1751         struct ssb_bus *bus = bp->sdev->bus;
1752
1753         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1754         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1755         switch (bus->bustype) {
1756         case SSB_BUSTYPE_PCI:
1757                 strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1758                 break;
1759         case SSB_BUSTYPE_SSB:
1760                 strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1761                 break;
1762         case SSB_BUSTYPE_PCMCIA:
1763         case SSB_BUSTYPE_SDIO:
1764                 WARN_ON(1); /* A device with this bus does not exist. */
1765                 break;
1766         }
1767 }
1768
1769 static int b44_nway_reset(struct net_device *dev)
1770 {
1771         struct b44 *bp = netdev_priv(dev);
1772         u32 bmcr;
1773         int r;
1774
1775         spin_lock_irq(&bp->lock);
1776         b44_readphy(bp, MII_BMCR, &bmcr);
1777         b44_readphy(bp, MII_BMCR, &bmcr);
1778         r = -EINVAL;
1779         if (bmcr & BMCR_ANENABLE) {
1780                 b44_writephy(bp, MII_BMCR,
1781                              bmcr | BMCR_ANRESTART);
1782                 r = 0;
1783         }
1784         spin_unlock_irq(&bp->lock);
1785
1786         return r;
1787 }
1788
1789 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1790 {
1791         struct b44 *bp = netdev_priv(dev);
1792
1793         cmd->supported = (SUPPORTED_Autoneg);
1794         cmd->supported |= (SUPPORTED_100baseT_Half |
1795                           SUPPORTED_100baseT_Full |
1796                           SUPPORTED_10baseT_Half |
1797                           SUPPORTED_10baseT_Full |
1798                           SUPPORTED_MII);
1799
1800         cmd->advertising = 0;
1801         if (bp->flags & B44_FLAG_ADV_10HALF)
1802                 cmd->advertising |= ADVERTISED_10baseT_Half;
1803         if (bp->flags & B44_FLAG_ADV_10FULL)
1804                 cmd->advertising |= ADVERTISED_10baseT_Full;
1805         if (bp->flags & B44_FLAG_ADV_100HALF)
1806                 cmd->advertising |= ADVERTISED_100baseT_Half;
1807         if (bp->flags & B44_FLAG_ADV_100FULL)
1808                 cmd->advertising |= ADVERTISED_100baseT_Full;
1809         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1810         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1811                 SPEED_100 : SPEED_10;
1812         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1813                 DUPLEX_FULL : DUPLEX_HALF;
1814         cmd->port = 0;
1815         cmd->phy_address = bp->phy_addr;
1816         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1817                 XCVR_INTERNAL : XCVR_EXTERNAL;
1818         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1819                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1820         if (cmd->autoneg == AUTONEG_ENABLE)
1821                 cmd->advertising |= ADVERTISED_Autoneg;
1822         if (!netif_running(dev)){
1823                 cmd->speed = 0;
1824                 cmd->duplex = 0xff;
1825         }
1826         cmd->maxtxpkt = 0;
1827         cmd->maxrxpkt = 0;
1828         return 0;
1829 }
1830
1831 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1832 {
1833         struct b44 *bp = netdev_priv(dev);
1834
1835         /* We do not support gigabit. */
1836         if (cmd->autoneg == AUTONEG_ENABLE) {
1837                 if (cmd->advertising &
1838                     (ADVERTISED_1000baseT_Half |
1839                      ADVERTISED_1000baseT_Full))
1840                         return -EINVAL;
1841         } else if ((cmd->speed != SPEED_100 &&
1842                     cmd->speed != SPEED_10) ||
1843                    (cmd->duplex != DUPLEX_HALF &&
1844                     cmd->duplex != DUPLEX_FULL)) {
1845                         return -EINVAL;
1846         }
1847
1848         spin_lock_irq(&bp->lock);
1849
1850         if (cmd->autoneg == AUTONEG_ENABLE) {
1851                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1852                                B44_FLAG_100_BASE_T |
1853                                B44_FLAG_FULL_DUPLEX |
1854                                B44_FLAG_ADV_10HALF |
1855                                B44_FLAG_ADV_10FULL |
1856                                B44_FLAG_ADV_100HALF |
1857                                B44_FLAG_ADV_100FULL);
1858                 if (cmd->advertising == 0) {
1859                         bp->flags |= (B44_FLAG_ADV_10HALF |
1860                                       B44_FLAG_ADV_10FULL |
1861                                       B44_FLAG_ADV_100HALF |
1862                                       B44_FLAG_ADV_100FULL);
1863                 } else {
1864                         if (cmd->advertising & ADVERTISED_10baseT_Half)
1865                                 bp->flags |= B44_FLAG_ADV_10HALF;
1866                         if (cmd->advertising & ADVERTISED_10baseT_Full)
1867                                 bp->flags |= B44_FLAG_ADV_10FULL;
1868                         if (cmd->advertising & ADVERTISED_100baseT_Half)
1869                                 bp->flags |= B44_FLAG_ADV_100HALF;
1870                         if (cmd->advertising & ADVERTISED_100baseT_Full)
1871                                 bp->flags |= B44_FLAG_ADV_100FULL;
1872                 }
1873         } else {
1874                 bp->flags |= B44_FLAG_FORCE_LINK;
1875                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1876                 if (cmd->speed == SPEED_100)
1877                         bp->flags |= B44_FLAG_100_BASE_T;
1878                 if (cmd->duplex == DUPLEX_FULL)
1879                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1880         }
1881
1882         if (netif_running(dev))
1883                 b44_setup_phy(bp);
1884
1885         spin_unlock_irq(&bp->lock);
1886
1887         return 0;
1888 }
1889
1890 static void b44_get_ringparam(struct net_device *dev,
1891                               struct ethtool_ringparam *ering)
1892 {
1893         struct b44 *bp = netdev_priv(dev);
1894
1895         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1896         ering->rx_pending = bp->rx_pending;
1897
1898         /* XXX ethtool lacks a tx_max_pending, oops... */
1899 }
1900
1901 static int b44_set_ringparam(struct net_device *dev,
1902                              struct ethtool_ringparam *ering)
1903 {
1904         struct b44 *bp = netdev_priv(dev);
1905
1906         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1907             (ering->rx_mini_pending != 0) ||
1908             (ering->rx_jumbo_pending != 0) ||
1909             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1910                 return -EINVAL;
1911
1912         spin_lock_irq(&bp->lock);
1913
1914         bp->rx_pending = ering->rx_pending;
1915         bp->tx_pending = ering->tx_pending;
1916
1917         b44_halt(bp);
1918         b44_init_rings(bp);
1919         b44_init_hw(bp, B44_FULL_RESET);
1920         netif_wake_queue(bp->dev);
1921         spin_unlock_irq(&bp->lock);
1922
1923         b44_enable_ints(bp);
1924
1925         return 0;
1926 }
1927
1928 static void b44_get_pauseparam(struct net_device *dev,
1929                                 struct ethtool_pauseparam *epause)
1930 {
1931         struct b44 *bp = netdev_priv(dev);
1932
1933         epause->autoneg =
1934                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1935         epause->rx_pause =
1936                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1937         epause->tx_pause =
1938                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1939 }
1940
1941 static int b44_set_pauseparam(struct net_device *dev,
1942                                 struct ethtool_pauseparam *epause)
1943 {
1944         struct b44 *bp = netdev_priv(dev);
1945
1946         spin_lock_irq(&bp->lock);
1947         if (epause->autoneg)
1948                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1949         else
1950                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1951         if (epause->rx_pause)
1952                 bp->flags |= B44_FLAG_RX_PAUSE;
1953         else
1954                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1955         if (epause->tx_pause)
1956                 bp->flags |= B44_FLAG_TX_PAUSE;
1957         else
1958                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1959         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1960                 b44_halt(bp);
1961                 b44_init_rings(bp);
1962                 b44_init_hw(bp, B44_FULL_RESET);
1963         } else {
1964                 __b44_set_flow_ctrl(bp, bp->flags);
1965         }
1966         spin_unlock_irq(&bp->lock);
1967
1968         b44_enable_ints(bp);
1969
1970         return 0;
1971 }
1972
1973 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1974 {
1975         switch(stringset) {
1976         case ETH_SS_STATS:
1977                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1978                 break;
1979         }
1980 }
1981
1982 static int b44_get_sset_count(struct net_device *dev, int sset)
1983 {
1984         switch (sset) {
1985         case ETH_SS_STATS:
1986                 return ARRAY_SIZE(b44_gstrings);
1987         default:
1988                 return -EOPNOTSUPP;
1989         }
1990 }
1991
1992 static void b44_get_ethtool_stats(struct net_device *dev,
1993                                   struct ethtool_stats *stats, u64 *data)
1994 {
1995         struct b44 *bp = netdev_priv(dev);
1996         u32 *val = &bp->hw_stats.tx_good_octets;
1997         u32 i;
1998
1999         spin_lock_irq(&bp->lock);
2000
2001         b44_stats_update(bp);
2002
2003         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2004                 *data++ = *val++;
2005
2006         spin_unlock_irq(&bp->lock);
2007 }
2008
2009 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2010 {
2011         struct b44 *bp = netdev_priv(dev);
2012
2013         wol->supported = WAKE_MAGIC;
2014         if (bp->flags & B44_FLAG_WOL_ENABLE)
2015                 wol->wolopts = WAKE_MAGIC;
2016         else
2017                 wol->wolopts = 0;
2018         memset(&wol->sopass, 0, sizeof(wol->sopass));
2019 }
2020
2021 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2022 {
2023         struct b44 *bp = netdev_priv(dev);
2024
2025         spin_lock_irq(&bp->lock);
2026         if (wol->wolopts & WAKE_MAGIC)
2027                 bp->flags |= B44_FLAG_WOL_ENABLE;
2028         else
2029                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2030         spin_unlock_irq(&bp->lock);
2031
2032         return 0;
2033 }
2034
2035 static const struct ethtool_ops b44_ethtool_ops = {
2036         .get_drvinfo            = b44_get_drvinfo,
2037         .get_settings           = b44_get_settings,
2038         .set_settings           = b44_set_settings,
2039         .nway_reset             = b44_nway_reset,
2040         .get_link               = ethtool_op_get_link,
2041         .get_wol                = b44_get_wol,
2042         .set_wol                = b44_set_wol,
2043         .get_ringparam          = b44_get_ringparam,
2044         .set_ringparam          = b44_set_ringparam,
2045         .get_pauseparam         = b44_get_pauseparam,
2046         .set_pauseparam         = b44_set_pauseparam,
2047         .get_msglevel           = b44_get_msglevel,
2048         .set_msglevel           = b44_set_msglevel,
2049         .get_strings            = b44_get_strings,
2050         .get_sset_count         = b44_get_sset_count,
2051         .get_ethtool_stats      = b44_get_ethtool_stats,
2052 };
2053
2054 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2055 {
2056         struct mii_ioctl_data *data = if_mii(ifr);
2057         struct b44 *bp = netdev_priv(dev);
2058         int err = -EINVAL;
2059
2060         if (!netif_running(dev))
2061                 goto out;
2062
2063         spin_lock_irq(&bp->lock);
2064         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2065         spin_unlock_irq(&bp->lock);
2066 out:
2067         return err;
2068 }
2069
2070 static int __devinit b44_get_invariants(struct b44 *bp)
2071 {
2072         struct ssb_device *sdev = bp->sdev;
2073         int err = 0;
2074         u8 *addr;
2075
2076         bp->dma_offset = ssb_dma_translation(sdev);
2077
2078         if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2079             instance > 1) {
2080                 addr = sdev->bus->sprom.et1mac;
2081                 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2082         } else {
2083                 addr = sdev->bus->sprom.et0mac;
2084                 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2085         }
2086         /* Some ROMs have buggy PHY addresses with the high
2087          * bits set (sign extension?). Truncate them to a
2088          * valid PHY address. */
2089         bp->phy_addr &= 0x1F;
2090
2091         memcpy(bp->dev->dev_addr, addr, 6);
2092
2093         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2094                 pr_err("Invalid MAC address found in EEPROM\n");
2095                 return -EINVAL;
2096         }
2097
2098         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2099
2100         bp->imask = IMASK_DEF;
2101
2102         /* XXX - really required?
2103            bp->flags |= B44_FLAG_BUGGY_TXPTR;
2104         */
2105
2106         if (bp->sdev->id.revision >= 7)
2107                 bp->flags |= B44_FLAG_B0_ANDLATER;
2108
2109         return err;
2110 }
2111
2112 static const struct net_device_ops b44_netdev_ops = {
2113         .ndo_open               = b44_open,
2114         .ndo_stop               = b44_close,
2115         .ndo_start_xmit         = b44_start_xmit,
2116         .ndo_get_stats          = b44_get_stats,
2117         .ndo_set_multicast_list = b44_set_rx_mode,
2118         .ndo_set_mac_address    = b44_set_mac_addr,
2119         .ndo_validate_addr      = eth_validate_addr,
2120         .ndo_do_ioctl           = b44_ioctl,
2121         .ndo_tx_timeout         = b44_tx_timeout,
2122         .ndo_change_mtu         = b44_change_mtu,
2123 #ifdef CONFIG_NET_POLL_CONTROLLER
2124         .ndo_poll_controller    = b44_poll_controller,
2125 #endif
2126 };
2127
2128 static int __devinit b44_init_one(struct ssb_device *sdev,
2129                                   const struct ssb_device_id *ent)
2130 {
2131         static int b44_version_printed = 0;
2132         struct net_device *dev;
2133         struct b44 *bp;
2134         int err;
2135
2136         instance++;
2137
2138         if (b44_version_printed++ == 0)
2139                 pr_info("%s", version);
2140
2141
2142         dev = alloc_etherdev(sizeof(*bp));
2143         if (!dev) {
2144                 dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
2145                 err = -ENOMEM;
2146                 goto out;
2147         }
2148
2149         SET_NETDEV_DEV(dev, sdev->dev);
2150
2151         /* No interesting netdevice features in this card... */
2152         dev->features |= 0;
2153
2154         bp = netdev_priv(dev);
2155         bp->sdev = sdev;
2156         bp->dev = dev;
2157         bp->force_copybreak = 0;
2158
2159         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2160
2161         spin_lock_init(&bp->lock);
2162
2163         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2164         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2165
2166         dev->netdev_ops = &b44_netdev_ops;
2167         netif_napi_add(dev, &bp->napi, b44_poll, 64);
2168         dev->watchdog_timeo = B44_TX_TIMEOUT;
2169         dev->irq = sdev->irq;
2170         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2171
2172         err = ssb_bus_powerup(sdev->bus, 0);
2173         if (err) {
2174                 dev_err(sdev->dev,
2175                         "Failed to powerup the bus\n");
2176                 goto err_out_free_dev;
2177         }
2178
2179         if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
2180             dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
2181                 dev_err(sdev->dev,
2182                         "Required 30BIT DMA mask unsupported by the system\n");
2183                 goto err_out_powerdown;
2184         }
2185
2186         err = b44_get_invariants(bp);
2187         if (err) {
2188                 dev_err(sdev->dev,
2189                         "Problem fetching invariants of chip, aborting\n");
2190                 goto err_out_powerdown;
2191         }
2192
2193         bp->mii_if.dev = dev;
2194         bp->mii_if.mdio_read = b44_mii_read;
2195         bp->mii_if.mdio_write = b44_mii_write;
2196         bp->mii_if.phy_id = bp->phy_addr;
2197         bp->mii_if.phy_id_mask = 0x1f;
2198         bp->mii_if.reg_num_mask = 0x1f;
2199
2200         /* By default, advertise all speed/duplex settings. */
2201         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2202                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2203
2204         /* By default, auto-negotiate PAUSE. */
2205         bp->flags |= B44_FLAG_PAUSE_AUTO;
2206
2207         err = register_netdev(dev);
2208         if (err) {
2209                 dev_err(sdev->dev, "Cannot register net device, aborting\n");
2210                 goto err_out_powerdown;
2211         }
2212
2213         netif_carrier_off(dev);
2214
2215         ssb_set_drvdata(sdev, dev);
2216
2217         /* Chip reset provides power to the b44 MAC & PCI cores, which
2218          * is necessary for MAC register access.
2219          */
2220         b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2221
2222         /* do a phy reset to test if there is an active phy */
2223         if (b44_phy_reset(bp) < 0)
2224                 bp->phy_addr = B44_PHY_ADDR_NO_PHY;
2225
2226         netdev_info(dev, "Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
2227                     dev->dev_addr);
2228
2229         return 0;
2230
2231 err_out_powerdown:
2232         ssb_bus_may_powerdown(sdev->bus);
2233
2234 err_out_free_dev:
2235         free_netdev(dev);
2236
2237 out:
2238         return err;
2239 }
2240
2241 static void __devexit b44_remove_one(struct ssb_device *sdev)
2242 {
2243         struct net_device *dev = ssb_get_drvdata(sdev);
2244
2245         unregister_netdev(dev);
2246         ssb_device_disable(sdev, 0);
2247         ssb_bus_may_powerdown(sdev->bus);
2248         free_netdev(dev);
2249         ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2250         ssb_set_drvdata(sdev, NULL);
2251 }
2252
2253 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2254 {
2255         struct net_device *dev = ssb_get_drvdata(sdev);
2256         struct b44 *bp = netdev_priv(dev);
2257
2258         if (!netif_running(dev))
2259                 return 0;
2260
2261         del_timer_sync(&bp->timer);
2262
2263         spin_lock_irq(&bp->lock);
2264
2265         b44_halt(bp);
2266         netif_carrier_off(bp->dev);
2267         netif_device_detach(bp->dev);
2268         b44_free_rings(bp);
2269
2270         spin_unlock_irq(&bp->lock);
2271
2272         free_irq(dev->irq, dev);
2273         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2274                 b44_init_hw(bp, B44_PARTIAL_RESET);
2275                 b44_setup_wol(bp);
2276         }
2277
2278         ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2279         return 0;
2280 }
2281
2282 static int b44_resume(struct ssb_device *sdev)
2283 {
2284         struct net_device *dev = ssb_get_drvdata(sdev);
2285         struct b44 *bp = netdev_priv(dev);
2286         int rc = 0;
2287
2288         rc = ssb_bus_powerup(sdev->bus, 0);
2289         if (rc) {
2290                 dev_err(sdev->dev,
2291                         "Failed to powerup the bus\n");
2292                 return rc;
2293         }
2294
2295         if (!netif_running(dev))
2296                 return 0;
2297
2298         spin_lock_irq(&bp->lock);
2299         b44_init_rings(bp);
2300         b44_init_hw(bp, B44_FULL_RESET);
2301         spin_unlock_irq(&bp->lock);
2302
2303         /*
2304          * As a shared interrupt, the handler can be called immediately. To be
2305          * able to check the interrupt status the hardware must already be
2306          * powered back on (b44_init_hw).
2307          */
2308         rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2309         if (rc) {
2310                 netdev_err(dev, "request_irq failed\n");
2311                 spin_lock_irq(&bp->lock);
2312                 b44_halt(bp);
2313                 b44_free_rings(bp);
2314                 spin_unlock_irq(&bp->lock);
2315                 return rc;
2316         }
2317
2318         netif_device_attach(bp->dev);
2319
2320         b44_enable_ints(bp);
2321         netif_wake_queue(dev);
2322
2323         mod_timer(&bp->timer, jiffies + 1);
2324
2325         return 0;
2326 }
2327
2328 static struct ssb_driver b44_ssb_driver = {
2329         .name           = DRV_MODULE_NAME,
2330         .id_table       = b44_ssb_tbl,
2331         .probe          = b44_init_one,
2332         .remove         = __devexit_p(b44_remove_one),
2333         .suspend        = b44_suspend,
2334         .resume         = b44_resume,
2335 };
2336
2337 static inline int b44_pci_init(void)
2338 {
2339         int err = 0;
2340 #ifdef CONFIG_B44_PCI
2341         err = ssb_pcihost_register(&b44_pci_driver);
2342 #endif
2343         return err;
2344 }
2345
2346 static inline void b44_pci_exit(void)
2347 {
2348 #ifdef CONFIG_B44_PCI
2349         ssb_pcihost_unregister(&b44_pci_driver);
2350 #endif
2351 }
2352
2353 static int __init b44_init(void)
2354 {
2355         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2356         int err;
2357
2358         /* Setup paramaters for syncing RX/TX DMA descriptors */
2359         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2360
2361         err = b44_pci_init();
2362         if (err)
2363                 return err;
2364         err = ssb_driver_register(&b44_ssb_driver);
2365         if (err)
2366                 b44_pci_exit();
2367         return err;
2368 }
2369
2370 static void __exit b44_cleanup(void)
2371 {
2372         ssb_driver_unregister(&b44_ssb_driver);
2373         b44_pci_exit();
2374 }
2375
2376 module_init(b44_init);
2377 module_exit(b44_cleanup);
2378