[BNX2]: Restructure PHY event handling.
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define DRV_MODULE_NAME         "bnx2"
56 #define PFX DRV_MODULE_NAME     ": "
57 #define DRV_MODULE_VERSION      "1.5.8"
58 #define DRV_MODULE_RELDATE      "April 24, 2007"
59
60 #define RUN_AT(x) (jiffies + (x))
61
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT  (5*HZ)
64
65 static const char version[] __devinitdata =
66         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int disable_msi = 0;
74
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78 typedef enum {
79         BCM5706 = 0,
80         NC370T,
81         NC370I,
82         BCM5706S,
83         NC370F,
84         BCM5708,
85         BCM5708S,
86         BCM5709,
87         BCM5709S,
88 } board_t;
89
90 /* indexed by board_t, above */
91 static const struct {
92         char *name;
93 } board_info[] __devinitdata = {
94         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95         { "HP NC370T Multifunction Gigabit Server Adapter" },
96         { "HP NC370i Multifunction Gigabit Server Adapter" },
97         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98         { "HP NC370F Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
103         };
104
105 static struct pci_device_id bnx2_pci_tbl[] = {
106         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
124         { 0, }
125 };
126
127 static struct flash_spec flash_table[] =
128 {
129         /* Slow EEPROM */
130         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133          "EEPROM - slow"},
134         /* Expansion entry 0001 */
135         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138          "Entry 0001"},
139         /* Saifun SA25F010 (non-buffered flash) */
140         /* strap, cfg1, & write1 need updates */
141         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144          "Non-buffered flash (128kB)"},
145         /* Saifun SA25F020 (non-buffered flash) */
146         /* strap, cfg1, & write1 need updates */
147         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150          "Non-buffered flash (256kB)"},
151         /* Expansion entry 0100 */
152         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155          "Entry 0100"},
156         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166         /* Saifun SA25F005 (non-buffered flash) */
167         /* strap, cfg1, & write1 need updates */
168         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171          "Non-buffered flash (64kB)"},
172         /* Fast EEPROM */
173         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176          "EEPROM - fast"},
177         /* Expansion entry 1001 */
178         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181          "Entry 1001"},
182         /* Expansion entry 1010 */
183         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186          "Entry 1010"},
187         /* ATMEL AT45DB011B (buffered flash) */
188         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191          "Buffered flash (128kB)"},
192         /* Expansion entry 1100 */
193         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196          "Entry 1100"},
197         /* Expansion entry 1101 */
198         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201          "Entry 1101"},
202         /* Ateml Expansion entry 1110 */
203         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206          "Entry 1110 (Atmel)"},
207         /* ATMEL AT45DB021B (buffered flash) */
208         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211          "Buffered flash (256kB)"},
212 };
213
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217 {
218         u32 diff;
219
220         smp_mb();
221
222         /* The ring uses 256 indices for 255 entries, one of them
223          * needs to be skipped.
224          */
225         diff = bp->tx_prod - bp->tx_cons;
226         if (unlikely(diff >= TX_DESC_CNT)) {
227                 diff &= 0xffff;
228                 if (diff == TX_DESC_CNT)
229                         diff = MAX_TX_DESC_CNT;
230         }
231         return (bp->tx_ring_size - diff);
232 }
233
234 static u32
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236 {
237         u32 val;
238
239         spin_lock_bh(&bp->indirect_lock);
240         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242         spin_unlock_bh(&bp->indirect_lock);
243         return val;
244 }
245
246 static void
247 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248 {
249         spin_lock_bh(&bp->indirect_lock);
250         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252         spin_unlock_bh(&bp->indirect_lock);
253 }
254
255 static void
256 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257 {
258         offset += cid_addr;
259         spin_lock_bh(&bp->indirect_lock);
260         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261                 int i;
262
263                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266                 for (i = 0; i < 5; i++) {
267                         u32 val;
268                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270                                 break;
271                         udelay(5);
272                 }
273         } else {
274                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275                 REG_WR(bp, BNX2_CTX_DATA, val);
276         }
277         spin_unlock_bh(&bp->indirect_lock);
278 }
279
280 static int
281 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282 {
283         u32 val1;
284         int i, ret;
285
286         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293                 udelay(40);
294         }
295
296         val1 = (bp->phy_addr << 21) | (reg << 16) |
297                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298                 BNX2_EMAC_MDIO_COMM_START_BUSY;
299         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301         for (i = 0; i < 50; i++) {
302                 udelay(10);
303
304                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306                         udelay(5);
307
308                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311                         break;
312                 }
313         }
314
315         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316                 *val = 0x0;
317                 ret = -EBUSY;
318         }
319         else {
320                 *val = val1;
321                 ret = 0;
322         }
323
324         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331                 udelay(40);
332         }
333
334         return ret;
335 }
336
337 static int
338 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339 {
340         u32 val1;
341         int i, ret;
342
343         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350                 udelay(40);
351         }
352
353         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
357
358         for (i = 0; i < 50; i++) {
359                 udelay(10);
360
361                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363                         udelay(5);
364                         break;
365                 }
366         }
367
368         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369                 ret = -EBUSY;
370         else
371                 ret = 0;
372
373         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380                 udelay(40);
381         }
382
383         return ret;
384 }
385
386 static void
387 bnx2_disable_int(struct bnx2 *bp)
388 {
389         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392 }
393
394 static void
395 bnx2_enable_int(struct bnx2 *bp)
396 {
397         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
404         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
405 }
406
407 static void
408 bnx2_disable_int_sync(struct bnx2 *bp)
409 {
410         atomic_inc(&bp->intr_sem);
411         bnx2_disable_int(bp);
412         synchronize_irq(bp->pdev->irq);
413 }
414
415 static void
416 bnx2_netif_stop(struct bnx2 *bp)
417 {
418         bnx2_disable_int_sync(bp);
419         if (netif_running(bp->dev)) {
420                 netif_poll_disable(bp->dev);
421                 netif_tx_disable(bp->dev);
422                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423         }
424 }
425
426 static void
427 bnx2_netif_start(struct bnx2 *bp)
428 {
429         if (atomic_dec_and_test(&bp->intr_sem)) {
430                 if (netif_running(bp->dev)) {
431                         netif_wake_queue(bp->dev);
432                         netif_poll_enable(bp->dev);
433                         bnx2_enable_int(bp);
434                 }
435         }
436 }
437
438 static void
439 bnx2_free_mem(struct bnx2 *bp)
440 {
441         int i;
442
443         for (i = 0; i < bp->ctx_pages; i++) {
444                 if (bp->ctx_blk[i]) {
445                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446                                             bp->ctx_blk[i],
447                                             bp->ctx_blk_mapping[i]);
448                         bp->ctx_blk[i] = NULL;
449                 }
450         }
451         if (bp->status_blk) {
452                 pci_free_consistent(bp->pdev, bp->status_stats_size,
453                                     bp->status_blk, bp->status_blk_mapping);
454                 bp->status_blk = NULL;
455                 bp->stats_blk = NULL;
456         }
457         if (bp->tx_desc_ring) {
458                 pci_free_consistent(bp->pdev,
459                                     sizeof(struct tx_bd) * TX_DESC_CNT,
460                                     bp->tx_desc_ring, bp->tx_desc_mapping);
461                 bp->tx_desc_ring = NULL;
462         }
463         kfree(bp->tx_buf_ring);
464         bp->tx_buf_ring = NULL;
465         for (i = 0; i < bp->rx_max_ring; i++) {
466                 if (bp->rx_desc_ring[i])
467                         pci_free_consistent(bp->pdev,
468                                             sizeof(struct rx_bd) * RX_DESC_CNT,
469                                             bp->rx_desc_ring[i],
470                                             bp->rx_desc_mapping[i]);
471                 bp->rx_desc_ring[i] = NULL;
472         }
473         vfree(bp->rx_buf_ring);
474         bp->rx_buf_ring = NULL;
475 }
476
477 static int
478 bnx2_alloc_mem(struct bnx2 *bp)
479 {
480         int i, status_blk_size;
481
482         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483                                   GFP_KERNEL);
484         if (bp->tx_buf_ring == NULL)
485                 return -ENOMEM;
486
487         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488                                                 sizeof(struct tx_bd) *
489                                                 TX_DESC_CNT,
490                                                 &bp->tx_desc_mapping);
491         if (bp->tx_desc_ring == NULL)
492                 goto alloc_mem_err;
493
494         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495                                   bp->rx_max_ring);
496         if (bp->rx_buf_ring == NULL)
497                 goto alloc_mem_err;
498
499         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500                                    bp->rx_max_ring);
501
502         for (i = 0; i < bp->rx_max_ring; i++) {
503                 bp->rx_desc_ring[i] =
504                         pci_alloc_consistent(bp->pdev,
505                                              sizeof(struct rx_bd) * RX_DESC_CNT,
506                                              &bp->rx_desc_mapping[i]);
507                 if (bp->rx_desc_ring[i] == NULL)
508                         goto alloc_mem_err;
509
510         }
511
512         /* Combine status and statistics blocks into one allocation. */
513         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514         bp->status_stats_size = status_blk_size +
515                                 sizeof(struct statistics_block);
516
517         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518                                               &bp->status_blk_mapping);
519         if (bp->status_blk == NULL)
520                 goto alloc_mem_err;
521
522         memset(bp->status_blk, 0, bp->status_stats_size);
523
524         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525                                   status_blk_size);
526
527         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
528
529         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531                 if (bp->ctx_pages == 0)
532                         bp->ctx_pages = 1;
533                 for (i = 0; i < bp->ctx_pages; i++) {
534                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535                                                 BCM_PAGE_SIZE,
536                                                 &bp->ctx_blk_mapping[i]);
537                         if (bp->ctx_blk[i] == NULL)
538                                 goto alloc_mem_err;
539                 }
540         }
541         return 0;
542
543 alloc_mem_err:
544         bnx2_free_mem(bp);
545         return -ENOMEM;
546 }
547
548 static void
549 bnx2_report_fw_link(struct bnx2 *bp)
550 {
551         u32 fw_link_status = 0;
552
553         if (bp->link_up) {
554                 u32 bmsr;
555
556                 switch (bp->line_speed) {
557                 case SPEED_10:
558                         if (bp->duplex == DUPLEX_HALF)
559                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
560                         else
561                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
562                         break;
563                 case SPEED_100:
564                         if (bp->duplex == DUPLEX_HALF)
565                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
566                         else
567                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
568                         break;
569                 case SPEED_1000:
570                         if (bp->duplex == DUPLEX_HALF)
571                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
572                         else
573                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
574                         break;
575                 case SPEED_2500:
576                         if (bp->duplex == DUPLEX_HALF)
577                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
578                         else
579                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
580                         break;
581                 }
582
583                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
584
585                 if (bp->autoneg) {
586                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
587
588                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
589                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
590
591                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
592                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
593                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
594                         else
595                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
596                 }
597         }
598         else
599                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
600
601         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
602 }
603
604 static void
605 bnx2_report_link(struct bnx2 *bp)
606 {
607         if (bp->link_up) {
608                 netif_carrier_on(bp->dev);
609                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
610
611                 printk("%d Mbps ", bp->line_speed);
612
613                 if (bp->duplex == DUPLEX_FULL)
614                         printk("full duplex");
615                 else
616                         printk("half duplex");
617
618                 if (bp->flow_ctrl) {
619                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
620                                 printk(", receive ");
621                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
622                                         printk("& transmit ");
623                         }
624                         else {
625                                 printk(", transmit ");
626                         }
627                         printk("flow control ON");
628                 }
629                 printk("\n");
630         }
631         else {
632                 netif_carrier_off(bp->dev);
633                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
634         }
635
636         bnx2_report_fw_link(bp);
637 }
638
639 static void
640 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
641 {
642         u32 local_adv, remote_adv;
643
644         bp->flow_ctrl = 0;
645         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
646                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
647
648                 if (bp->duplex == DUPLEX_FULL) {
649                         bp->flow_ctrl = bp->req_flow_ctrl;
650                 }
651                 return;
652         }
653
654         if (bp->duplex != DUPLEX_FULL) {
655                 return;
656         }
657
658         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
659             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
660                 u32 val;
661
662                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
663                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
664                         bp->flow_ctrl |= FLOW_CTRL_TX;
665                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
666                         bp->flow_ctrl |= FLOW_CTRL_RX;
667                 return;
668         }
669
670         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
671         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
672
673         if (bp->phy_flags & PHY_SERDES_FLAG) {
674                 u32 new_local_adv = 0;
675                 u32 new_remote_adv = 0;
676
677                 if (local_adv & ADVERTISE_1000XPAUSE)
678                         new_local_adv |= ADVERTISE_PAUSE_CAP;
679                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
680                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
681                 if (remote_adv & ADVERTISE_1000XPAUSE)
682                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
683                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
684                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
685
686                 local_adv = new_local_adv;
687                 remote_adv = new_remote_adv;
688         }
689
690         /* See Table 28B-3 of 802.3ab-1999 spec. */
691         if (local_adv & ADVERTISE_PAUSE_CAP) {
692                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
693                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
694                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
695                         }
696                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
697                                 bp->flow_ctrl = FLOW_CTRL_RX;
698                         }
699                 }
700                 else {
701                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
702                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
703                         }
704                 }
705         }
706         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
707                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
708                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
709
710                         bp->flow_ctrl = FLOW_CTRL_TX;
711                 }
712         }
713 }
714
715 static int
716 bnx2_5709s_linkup(struct bnx2 *bp)
717 {
718         u32 val, speed;
719
720         bp->link_up = 1;
721
722         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
723         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
724         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
725
726         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
727                 bp->line_speed = bp->req_line_speed;
728                 bp->duplex = bp->req_duplex;
729                 return 0;
730         }
731         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
732         switch (speed) {
733                 case MII_BNX2_GP_TOP_AN_SPEED_10:
734                         bp->line_speed = SPEED_10;
735                         break;
736                 case MII_BNX2_GP_TOP_AN_SPEED_100:
737                         bp->line_speed = SPEED_100;
738                         break;
739                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
740                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
741                         bp->line_speed = SPEED_1000;
742                         break;
743                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
744                         bp->line_speed = SPEED_2500;
745                         break;
746         }
747         if (val & MII_BNX2_GP_TOP_AN_FD)
748                 bp->duplex = DUPLEX_FULL;
749         else
750                 bp->duplex = DUPLEX_HALF;
751         return 0;
752 }
753
754 static int
755 bnx2_5708s_linkup(struct bnx2 *bp)
756 {
757         u32 val;
758
759         bp->link_up = 1;
760         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
761         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
762                 case BCM5708S_1000X_STAT1_SPEED_10:
763                         bp->line_speed = SPEED_10;
764                         break;
765                 case BCM5708S_1000X_STAT1_SPEED_100:
766                         bp->line_speed = SPEED_100;
767                         break;
768                 case BCM5708S_1000X_STAT1_SPEED_1G:
769                         bp->line_speed = SPEED_1000;
770                         break;
771                 case BCM5708S_1000X_STAT1_SPEED_2G5:
772                         bp->line_speed = SPEED_2500;
773                         break;
774         }
775         if (val & BCM5708S_1000X_STAT1_FD)
776                 bp->duplex = DUPLEX_FULL;
777         else
778                 bp->duplex = DUPLEX_HALF;
779
780         return 0;
781 }
782
783 static int
784 bnx2_5706s_linkup(struct bnx2 *bp)
785 {
786         u32 bmcr, local_adv, remote_adv, common;
787
788         bp->link_up = 1;
789         bp->line_speed = SPEED_1000;
790
791         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
792         if (bmcr & BMCR_FULLDPLX) {
793                 bp->duplex = DUPLEX_FULL;
794         }
795         else {
796                 bp->duplex = DUPLEX_HALF;
797         }
798
799         if (!(bmcr & BMCR_ANENABLE)) {
800                 return 0;
801         }
802
803         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
804         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
805
806         common = local_adv & remote_adv;
807         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
808
809                 if (common & ADVERTISE_1000XFULL) {
810                         bp->duplex = DUPLEX_FULL;
811                 }
812                 else {
813                         bp->duplex = DUPLEX_HALF;
814                 }
815         }
816
817         return 0;
818 }
819
820 static int
821 bnx2_copper_linkup(struct bnx2 *bp)
822 {
823         u32 bmcr;
824
825         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
826         if (bmcr & BMCR_ANENABLE) {
827                 u32 local_adv, remote_adv, common;
828
829                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
830                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
831
832                 common = local_adv & (remote_adv >> 2);
833                 if (common & ADVERTISE_1000FULL) {
834                         bp->line_speed = SPEED_1000;
835                         bp->duplex = DUPLEX_FULL;
836                 }
837                 else if (common & ADVERTISE_1000HALF) {
838                         bp->line_speed = SPEED_1000;
839                         bp->duplex = DUPLEX_HALF;
840                 }
841                 else {
842                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
843                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
844
845                         common = local_adv & remote_adv;
846                         if (common & ADVERTISE_100FULL) {
847                                 bp->line_speed = SPEED_100;
848                                 bp->duplex = DUPLEX_FULL;
849                         }
850                         else if (common & ADVERTISE_100HALF) {
851                                 bp->line_speed = SPEED_100;
852                                 bp->duplex = DUPLEX_HALF;
853                         }
854                         else if (common & ADVERTISE_10FULL) {
855                                 bp->line_speed = SPEED_10;
856                                 bp->duplex = DUPLEX_FULL;
857                         }
858                         else if (common & ADVERTISE_10HALF) {
859                                 bp->line_speed = SPEED_10;
860                                 bp->duplex = DUPLEX_HALF;
861                         }
862                         else {
863                                 bp->line_speed = 0;
864                                 bp->link_up = 0;
865                         }
866                 }
867         }
868         else {
869                 if (bmcr & BMCR_SPEED100) {
870                         bp->line_speed = SPEED_100;
871                 }
872                 else {
873                         bp->line_speed = SPEED_10;
874                 }
875                 if (bmcr & BMCR_FULLDPLX) {
876                         bp->duplex = DUPLEX_FULL;
877                 }
878                 else {
879                         bp->duplex = DUPLEX_HALF;
880                 }
881         }
882
883         return 0;
884 }
885
886 static int
887 bnx2_set_mac_link(struct bnx2 *bp)
888 {
889         u32 val;
890
891         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
892         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
893                 (bp->duplex == DUPLEX_HALF)) {
894                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
895         }
896
897         /* Configure the EMAC mode register. */
898         val = REG_RD(bp, BNX2_EMAC_MODE);
899
900         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
901                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
902                 BNX2_EMAC_MODE_25G_MODE);
903
904         if (bp->link_up) {
905                 switch (bp->line_speed) {
906                         case SPEED_10:
907                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
908                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
909                                         break;
910                                 }
911                                 /* fall through */
912                         case SPEED_100:
913                                 val |= BNX2_EMAC_MODE_PORT_MII;
914                                 break;
915                         case SPEED_2500:
916                                 val |= BNX2_EMAC_MODE_25G_MODE;
917                                 /* fall through */
918                         case SPEED_1000:
919                                 val |= BNX2_EMAC_MODE_PORT_GMII;
920                                 break;
921                 }
922         }
923         else {
924                 val |= BNX2_EMAC_MODE_PORT_GMII;
925         }
926
927         /* Set the MAC to operate in the appropriate duplex mode. */
928         if (bp->duplex == DUPLEX_HALF)
929                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
930         REG_WR(bp, BNX2_EMAC_MODE, val);
931
932         /* Enable/disable rx PAUSE. */
933         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
934
935         if (bp->flow_ctrl & FLOW_CTRL_RX)
936                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
937         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
938
939         /* Enable/disable tx PAUSE. */
940         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
941         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
942
943         if (bp->flow_ctrl & FLOW_CTRL_TX)
944                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
945         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
946
947         /* Acknowledge the interrupt. */
948         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
949
950         return 0;
951 }
952
953 static void
954 bnx2_enable_bmsr1(struct bnx2 *bp)
955 {
956         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957             (CHIP_NUM(bp) == CHIP_NUM_5709))
958                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959                                MII_BNX2_BLK_ADDR_GP_STATUS);
960 }
961
962 static void
963 bnx2_disable_bmsr1(struct bnx2 *bp)
964 {
965         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
966             (CHIP_NUM(bp) == CHIP_NUM_5709))
967                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
968                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
969 }
970
971 static int
972 bnx2_test_and_enable_2g5(struct bnx2 *bp)
973 {
974         u32 up1;
975         int ret = 1;
976
977         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
978                 return 0;
979
980         if (bp->autoneg & AUTONEG_SPEED)
981                 bp->advertising |= ADVERTISED_2500baseX_Full;
982
983         if (CHIP_NUM(bp) == CHIP_NUM_5709)
984                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
985
986         bnx2_read_phy(bp, bp->mii_up1, &up1);
987         if (!(up1 & BCM5708S_UP1_2G5)) {
988                 up1 |= BCM5708S_UP1_2G5;
989                 bnx2_write_phy(bp, bp->mii_up1, up1);
990                 ret = 0;
991         }
992
993         if (CHIP_NUM(bp) == CHIP_NUM_5709)
994                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
995                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
996
997         return ret;
998 }
999
1000 static int
1001 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1002 {
1003         u32 up1;
1004         int ret = 0;
1005
1006         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1007                 return 0;
1008
1009         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1011
1012         bnx2_read_phy(bp, bp->mii_up1, &up1);
1013         if (up1 & BCM5708S_UP1_2G5) {
1014                 up1 &= ~BCM5708S_UP1_2G5;
1015                 bnx2_write_phy(bp, bp->mii_up1, up1);
1016                 ret = 1;
1017         }
1018
1019         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1022
1023         return ret;
1024 }
1025
1026 static void
1027 bnx2_enable_forced_2g5(struct bnx2 *bp)
1028 {
1029         u32 bmcr;
1030
1031         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1032                 return;
1033
1034         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1035                 u32 val;
1036
1037                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1038                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1039                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1040                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1041                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1042                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1043
1044                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1047
1048         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1049                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1051         }
1052
1053         if (bp->autoneg & AUTONEG_SPEED) {
1054                 bmcr &= ~BMCR_ANENABLE;
1055                 if (bp->req_duplex == DUPLEX_FULL)
1056                         bmcr |= BMCR_FULLDPLX;
1057         }
1058         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1059 }
1060
1061 static void
1062 bnx2_disable_forced_2g5(struct bnx2 *bp)
1063 {
1064         u32 bmcr;
1065
1066         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1067                 return;
1068
1069         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1070                 u32 val;
1071
1072                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1073                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1074                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1075                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1076                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1077
1078                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1079                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1080                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1081
1082         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1083                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1085         }
1086
1087         if (bp->autoneg & AUTONEG_SPEED)
1088                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1089         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1090 }
1091
1092 static int
1093 bnx2_set_link(struct bnx2 *bp)
1094 {
1095         u32 bmsr;
1096         u8 link_up;
1097
1098         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1099                 bp->link_up = 1;
1100                 return 0;
1101         }
1102
1103         link_up = bp->link_up;
1104
1105         bnx2_enable_bmsr1(bp);
1106         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1107         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1108         bnx2_disable_bmsr1(bp);
1109
1110         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1111             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1112                 u32 val;
1113
1114                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1115                 if (val & BNX2_EMAC_STATUS_LINK)
1116                         bmsr |= BMSR_LSTATUS;
1117                 else
1118                         bmsr &= ~BMSR_LSTATUS;
1119         }
1120
1121         if (bmsr & BMSR_LSTATUS) {
1122                 bp->link_up = 1;
1123
1124                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1125                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1126                                 bnx2_5706s_linkup(bp);
1127                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1128                                 bnx2_5708s_linkup(bp);
1129                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1130                                 bnx2_5709s_linkup(bp);
1131                 }
1132                 else {
1133                         bnx2_copper_linkup(bp);
1134                 }
1135                 bnx2_resolve_flow_ctrl(bp);
1136         }
1137         else {
1138                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1139                     (bp->autoneg & AUTONEG_SPEED))
1140                         bnx2_disable_forced_2g5(bp);
1141
1142                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1143                 bp->link_up = 0;
1144         }
1145
1146         if (bp->link_up != link_up) {
1147                 bnx2_report_link(bp);
1148         }
1149
1150         bnx2_set_mac_link(bp);
1151
1152         return 0;
1153 }
1154
1155 static int
1156 bnx2_reset_phy(struct bnx2 *bp)
1157 {
1158         int i;
1159         u32 reg;
1160
1161         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1162
1163 #define PHY_RESET_MAX_WAIT 100
1164         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1165                 udelay(10);
1166
1167                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1168                 if (!(reg & BMCR_RESET)) {
1169                         udelay(20);
1170                         break;
1171                 }
1172         }
1173         if (i == PHY_RESET_MAX_WAIT) {
1174                 return -EBUSY;
1175         }
1176         return 0;
1177 }
1178
1179 static u32
1180 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1181 {
1182         u32 adv = 0;
1183
1184         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1185                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1186
1187                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1188                         adv = ADVERTISE_1000XPAUSE;
1189                 }
1190                 else {
1191                         adv = ADVERTISE_PAUSE_CAP;
1192                 }
1193         }
1194         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1195                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1196                         adv = ADVERTISE_1000XPSE_ASYM;
1197                 }
1198                 else {
1199                         adv = ADVERTISE_PAUSE_ASYM;
1200                 }
1201         }
1202         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1203                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1205                 }
1206                 else {
1207                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1208                 }
1209         }
1210         return adv;
1211 }
1212
1213 static int
1214 bnx2_setup_serdes_phy(struct bnx2 *bp)
1215 {
1216         u32 adv, bmcr;
1217         u32 new_adv = 0;
1218
1219         if (!(bp->autoneg & AUTONEG_SPEED)) {
1220                 u32 new_bmcr;
1221                 int force_link_down = 0;
1222
1223                 if (bp->req_line_speed == SPEED_2500) {
1224                         if (!bnx2_test_and_enable_2g5(bp))
1225                                 force_link_down = 1;
1226                 } else if (bp->req_line_speed == SPEED_1000) {
1227                         if (bnx2_test_and_disable_2g5(bp))
1228                                 force_link_down = 1;
1229                 }
1230                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1231                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1232
1233                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1234                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1235                 new_bmcr |= BMCR_SPEED1000;
1236
1237                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1238                         if (bp->req_line_speed == SPEED_2500)
1239                                 bnx2_enable_forced_2g5(bp);
1240                         else if (bp->req_line_speed == SPEED_1000) {
1241                                 bnx2_disable_forced_2g5(bp);
1242                                 new_bmcr &= ~0x2000;
1243                         }
1244
1245                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1246                         if (bp->req_line_speed == SPEED_2500)
1247                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1248                         else
1249                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1250                 }
1251
1252                 if (bp->req_duplex == DUPLEX_FULL) {
1253                         adv |= ADVERTISE_1000XFULL;
1254                         new_bmcr |= BMCR_FULLDPLX;
1255                 }
1256                 else {
1257                         adv |= ADVERTISE_1000XHALF;
1258                         new_bmcr &= ~BMCR_FULLDPLX;
1259                 }
1260                 if ((new_bmcr != bmcr) || (force_link_down)) {
1261                         /* Force a link down visible on the other side */
1262                         if (bp->link_up) {
1263                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1264                                                ~(ADVERTISE_1000XFULL |
1265                                                  ADVERTISE_1000XHALF));
1266                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1267                                         BMCR_ANRESTART | BMCR_ANENABLE);
1268
1269                                 bp->link_up = 0;
1270                                 netif_carrier_off(bp->dev);
1271                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1272                                 bnx2_report_link(bp);
1273                         }
1274                         bnx2_write_phy(bp, bp->mii_adv, adv);
1275                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1276                 } else {
1277                         bnx2_resolve_flow_ctrl(bp);
1278                         bnx2_set_mac_link(bp);
1279                 }
1280                 return 0;
1281         }
1282
1283         bnx2_test_and_enable_2g5(bp);
1284
1285         if (bp->advertising & ADVERTISED_1000baseT_Full)
1286                 new_adv |= ADVERTISE_1000XFULL;
1287
1288         new_adv |= bnx2_phy_get_pause_adv(bp);
1289
1290         bnx2_read_phy(bp, bp->mii_adv, &adv);
1291         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1292
1293         bp->serdes_an_pending = 0;
1294         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1295                 /* Force a link down visible on the other side */
1296                 if (bp->link_up) {
1297                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1298                         spin_unlock_bh(&bp->phy_lock);
1299                         msleep(20);
1300                         spin_lock_bh(&bp->phy_lock);
1301                 }
1302
1303                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1304                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1305                         BMCR_ANENABLE);
1306                 /* Speed up link-up time when the link partner
1307                  * does not autonegotiate which is very common
1308                  * in blade servers. Some blade servers use
1309                  * IPMI for kerboard input and it's important
1310                  * to minimize link disruptions. Autoneg. involves
1311                  * exchanging base pages plus 3 next pages and
1312                  * normally completes in about 120 msec.
1313                  */
1314                 bp->current_interval = SERDES_AN_TIMEOUT;
1315                 bp->serdes_an_pending = 1;
1316                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1317         } else {
1318                 bnx2_resolve_flow_ctrl(bp);
1319                 bnx2_set_mac_link(bp);
1320         }
1321
1322         return 0;
1323 }
1324
1325 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1326         (ADVERTISED_1000baseT_Full)
1327
1328 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1329         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1330         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1331         ADVERTISED_1000baseT_Full)
1332
1333 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1334         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1335
1336 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1337
1338 static int
1339 bnx2_setup_copper_phy(struct bnx2 *bp)
1340 {
1341         u32 bmcr;
1342         u32 new_bmcr;
1343
1344         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1345
1346         if (bp->autoneg & AUTONEG_SPEED) {
1347                 u32 adv_reg, adv1000_reg;
1348                 u32 new_adv_reg = 0;
1349                 u32 new_adv1000_reg = 0;
1350
1351                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1352                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1353                         ADVERTISE_PAUSE_ASYM);
1354
1355                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1356                 adv1000_reg &= PHY_ALL_1000_SPEED;
1357
1358                 if (bp->advertising & ADVERTISED_10baseT_Half)
1359                         new_adv_reg |= ADVERTISE_10HALF;
1360                 if (bp->advertising & ADVERTISED_10baseT_Full)
1361                         new_adv_reg |= ADVERTISE_10FULL;
1362                 if (bp->advertising & ADVERTISED_100baseT_Half)
1363                         new_adv_reg |= ADVERTISE_100HALF;
1364                 if (bp->advertising & ADVERTISED_100baseT_Full)
1365                         new_adv_reg |= ADVERTISE_100FULL;
1366                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1367                         new_adv1000_reg |= ADVERTISE_1000FULL;
1368
1369                 new_adv_reg |= ADVERTISE_CSMA;
1370
1371                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1372
1373                 if ((adv1000_reg != new_adv1000_reg) ||
1374                         (adv_reg != new_adv_reg) ||
1375                         ((bmcr & BMCR_ANENABLE) == 0)) {
1376
1377                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1378                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1379                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1380                                 BMCR_ANENABLE);
1381                 }
1382                 else if (bp->link_up) {
1383                         /* Flow ctrl may have changed from auto to forced */
1384                         /* or vice-versa. */
1385
1386                         bnx2_resolve_flow_ctrl(bp);
1387                         bnx2_set_mac_link(bp);
1388                 }
1389                 return 0;
1390         }
1391
1392         new_bmcr = 0;
1393         if (bp->req_line_speed == SPEED_100) {
1394                 new_bmcr |= BMCR_SPEED100;
1395         }
1396         if (bp->req_duplex == DUPLEX_FULL) {
1397                 new_bmcr |= BMCR_FULLDPLX;
1398         }
1399         if (new_bmcr != bmcr) {
1400                 u32 bmsr;
1401
1402                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1403                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1404
1405                 if (bmsr & BMSR_LSTATUS) {
1406                         /* Force link down */
1407                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1408                         spin_unlock_bh(&bp->phy_lock);
1409                         msleep(50);
1410                         spin_lock_bh(&bp->phy_lock);
1411
1412                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1413                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1414                 }
1415
1416                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1417
1418                 /* Normally, the new speed is setup after the link has
1419                  * gone down and up again. In some cases, link will not go
1420                  * down so we need to set up the new speed here.
1421                  */
1422                 if (bmsr & BMSR_LSTATUS) {
1423                         bp->line_speed = bp->req_line_speed;
1424                         bp->duplex = bp->req_duplex;
1425                         bnx2_resolve_flow_ctrl(bp);
1426                         bnx2_set_mac_link(bp);
1427                 }
1428         } else {
1429                 bnx2_resolve_flow_ctrl(bp);
1430                 bnx2_set_mac_link(bp);
1431         }
1432         return 0;
1433 }
1434
1435 static int
1436 bnx2_setup_phy(struct bnx2 *bp)
1437 {
1438         if (bp->loopback == MAC_LOOPBACK)
1439                 return 0;
1440
1441         if (bp->phy_flags & PHY_SERDES_FLAG) {
1442                 return (bnx2_setup_serdes_phy(bp));
1443         }
1444         else {
1445                 return (bnx2_setup_copper_phy(bp));
1446         }
1447 }
1448
1449 static int
1450 bnx2_init_5709s_phy(struct bnx2 *bp)
1451 {
1452         u32 val;
1453
1454         bp->mii_bmcr = MII_BMCR + 0x10;
1455         bp->mii_bmsr = MII_BMSR + 0x10;
1456         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1457         bp->mii_adv = MII_ADVERTISE + 0x10;
1458         bp->mii_lpa = MII_LPA + 0x10;
1459         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1460
1461         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1462         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1463
1464         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1465         bnx2_reset_phy(bp);
1466
1467         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1468
1469         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1470         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1471         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1472         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1473
1474         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1475         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1476         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1477                 val |= BCM5708S_UP1_2G5;
1478         else
1479                 val &= ~BCM5708S_UP1_2G5;
1480         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1481
1482         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1483         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1484         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1485         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1486
1487         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1488
1489         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1490               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1491         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1492
1493         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1494
1495         return 0;
1496 }
1497
1498 static int
1499 bnx2_init_5708s_phy(struct bnx2 *bp)
1500 {
1501         u32 val;
1502
1503         bnx2_reset_phy(bp);
1504
1505         bp->mii_up1 = BCM5708S_UP1;
1506
1507         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1508         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1509         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1510
1511         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1512         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1513         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1514
1515         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1516         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1517         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1518
1519         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1520                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1521                 val |= BCM5708S_UP1_2G5;
1522                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1523         }
1524
1525         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1526             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1527             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1528                 /* increase tx signal amplitude */
1529                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1530                                BCM5708S_BLK_ADDR_TX_MISC);
1531                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1532                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1533                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1534                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1535         }
1536
1537         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1538               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1539
1540         if (val) {
1541                 u32 is_backplane;
1542
1543                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1544                                           BNX2_SHARED_HW_CFG_CONFIG);
1545                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1546                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1547                                        BCM5708S_BLK_ADDR_TX_MISC);
1548                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1549                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1550                                        BCM5708S_BLK_ADDR_DIG);
1551                 }
1552         }
1553         return 0;
1554 }
1555
1556 static int
1557 bnx2_init_5706s_phy(struct bnx2 *bp)
1558 {
1559         bnx2_reset_phy(bp);
1560
1561         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1562
1563         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1564                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1565
1566         if (bp->dev->mtu > 1500) {
1567                 u32 val;
1568
1569                 /* Set extended packet length bit */
1570                 bnx2_write_phy(bp, 0x18, 0x7);
1571                 bnx2_read_phy(bp, 0x18, &val);
1572                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1573
1574                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1575                 bnx2_read_phy(bp, 0x1c, &val);
1576                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1577         }
1578         else {
1579                 u32 val;
1580
1581                 bnx2_write_phy(bp, 0x18, 0x7);
1582                 bnx2_read_phy(bp, 0x18, &val);
1583                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1584
1585                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1586                 bnx2_read_phy(bp, 0x1c, &val);
1587                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1588         }
1589
1590         return 0;
1591 }
1592
1593 static int
1594 bnx2_init_copper_phy(struct bnx2 *bp)
1595 {
1596         u32 val;
1597
1598         bnx2_reset_phy(bp);
1599
1600         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1601                 bnx2_write_phy(bp, 0x18, 0x0c00);
1602                 bnx2_write_phy(bp, 0x17, 0x000a);
1603                 bnx2_write_phy(bp, 0x15, 0x310b);
1604                 bnx2_write_phy(bp, 0x17, 0x201f);
1605                 bnx2_write_phy(bp, 0x15, 0x9506);
1606                 bnx2_write_phy(bp, 0x17, 0x401f);
1607                 bnx2_write_phy(bp, 0x15, 0x14e2);
1608                 bnx2_write_phy(bp, 0x18, 0x0400);
1609         }
1610
1611         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1612                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1613                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1614                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1615                 val &= ~(1 << 8);
1616                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1617         }
1618
1619         if (bp->dev->mtu > 1500) {
1620                 /* Set extended packet length bit */
1621                 bnx2_write_phy(bp, 0x18, 0x7);
1622                 bnx2_read_phy(bp, 0x18, &val);
1623                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1624
1625                 bnx2_read_phy(bp, 0x10, &val);
1626                 bnx2_write_phy(bp, 0x10, val | 0x1);
1627         }
1628         else {
1629                 bnx2_write_phy(bp, 0x18, 0x7);
1630                 bnx2_read_phy(bp, 0x18, &val);
1631                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1632
1633                 bnx2_read_phy(bp, 0x10, &val);
1634                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1635         }
1636
1637         /* ethernet@wirespeed */
1638         bnx2_write_phy(bp, 0x18, 0x7007);
1639         bnx2_read_phy(bp, 0x18, &val);
1640         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1641         return 0;
1642 }
1643
1644
1645 static int
1646 bnx2_init_phy(struct bnx2 *bp)
1647 {
1648         u32 val;
1649         int rc = 0;
1650
1651         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1652         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1653
1654         bp->mii_bmcr = MII_BMCR;
1655         bp->mii_bmsr = MII_BMSR;
1656         bp->mii_bmsr1 = MII_BMSR;
1657         bp->mii_adv = MII_ADVERTISE;
1658         bp->mii_lpa = MII_LPA;
1659
1660         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1661
1662         bnx2_read_phy(bp, MII_PHYSID1, &val);
1663         bp->phy_id = val << 16;
1664         bnx2_read_phy(bp, MII_PHYSID2, &val);
1665         bp->phy_id |= val & 0xffff;
1666
1667         if (bp->phy_flags & PHY_SERDES_FLAG) {
1668                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1669                         rc = bnx2_init_5706s_phy(bp);
1670                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1671                         rc = bnx2_init_5708s_phy(bp);
1672                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1673                         rc = bnx2_init_5709s_phy(bp);
1674         }
1675         else {
1676                 rc = bnx2_init_copper_phy(bp);
1677         }
1678
1679         bnx2_setup_phy(bp);
1680
1681         return rc;
1682 }
1683
1684 static int
1685 bnx2_set_mac_loopback(struct bnx2 *bp)
1686 {
1687         u32 mac_mode;
1688
1689         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1690         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1691         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1692         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1693         bp->link_up = 1;
1694         return 0;
1695 }
1696
1697 static int bnx2_test_link(struct bnx2 *);
1698
1699 static int
1700 bnx2_set_phy_loopback(struct bnx2 *bp)
1701 {
1702         u32 mac_mode;
1703         int rc, i;
1704
1705         spin_lock_bh(&bp->phy_lock);
1706         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1707                             BMCR_SPEED1000);
1708         spin_unlock_bh(&bp->phy_lock);
1709         if (rc)
1710                 return rc;
1711
1712         for (i = 0; i < 10; i++) {
1713                 if (bnx2_test_link(bp) == 0)
1714                         break;
1715                 msleep(100);
1716         }
1717
1718         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1719         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1720                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1721                       BNX2_EMAC_MODE_25G_MODE);
1722
1723         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1724         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1725         bp->link_up = 1;
1726         return 0;
1727 }
1728
1729 static int
1730 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1731 {
1732         int i;
1733         u32 val;
1734
1735         bp->fw_wr_seq++;
1736         msg_data |= bp->fw_wr_seq;
1737
1738         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1739
1740         /* wait for an acknowledgement. */
1741         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1742                 msleep(10);
1743
1744                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1745
1746                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1747                         break;
1748         }
1749         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1750                 return 0;
1751
1752         /* If we timed out, inform the firmware that this is the case. */
1753         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1754                 if (!silent)
1755                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1756                                             "%x\n", msg_data);
1757
1758                 msg_data &= ~BNX2_DRV_MSG_CODE;
1759                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1760
1761                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1762
1763                 return -EBUSY;
1764         }
1765
1766         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1767                 return -EIO;
1768
1769         return 0;
1770 }
1771
1772 static int
1773 bnx2_init_5709_context(struct bnx2 *bp)
1774 {
1775         int i, ret = 0;
1776         u32 val;
1777
1778         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1779         val |= (BCM_PAGE_BITS - 8) << 16;
1780         REG_WR(bp, BNX2_CTX_COMMAND, val);
1781         for (i = 0; i < bp->ctx_pages; i++) {
1782                 int j;
1783
1784                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1785                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
1786                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1787                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1788                        (u64) bp->ctx_blk_mapping[i] >> 32);
1789                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1790                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1791                 for (j = 0; j < 10; j++) {
1792
1793                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1794                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1795                                 break;
1796                         udelay(5);
1797                 }
1798                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1799                         ret = -EBUSY;
1800                         break;
1801                 }
1802         }
1803         return ret;
1804 }
1805
1806 static void
1807 bnx2_init_context(struct bnx2 *bp)
1808 {
1809         u32 vcid;
1810
1811         vcid = 96;
1812         while (vcid) {
1813                 u32 vcid_addr, pcid_addr, offset;
1814
1815                 vcid--;
1816
1817                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1818                         u32 new_vcid;
1819
1820                         vcid_addr = GET_PCID_ADDR(vcid);
1821                         if (vcid & 0x8) {
1822                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1823                         }
1824                         else {
1825                                 new_vcid = vcid;
1826                         }
1827                         pcid_addr = GET_PCID_ADDR(new_vcid);
1828                 }
1829                 else {
1830                         vcid_addr = GET_CID_ADDR(vcid);
1831                         pcid_addr = vcid_addr;
1832                 }
1833
1834                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1835                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1836
1837                 /* Zero out the context. */
1838                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1839                         CTX_WR(bp, 0x00, offset, 0);
1840                 }
1841
1842                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1843                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1844         }
1845 }
1846
1847 static int
1848 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1849 {
1850         u16 *good_mbuf;
1851         u32 good_mbuf_cnt;
1852         u32 val;
1853
1854         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1855         if (good_mbuf == NULL) {
1856                 printk(KERN_ERR PFX "Failed to allocate memory in "
1857                                     "bnx2_alloc_bad_rbuf\n");
1858                 return -ENOMEM;
1859         }
1860
1861         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1862                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1863
1864         good_mbuf_cnt = 0;
1865
1866         /* Allocate a bunch of mbufs and save the good ones in an array. */
1867         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1868         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1869                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1870
1871                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1872
1873                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1874
1875                 /* The addresses with Bit 9 set are bad memory blocks. */
1876                 if (!(val & (1 << 9))) {
1877                         good_mbuf[good_mbuf_cnt] = (u16) val;
1878                         good_mbuf_cnt++;
1879                 }
1880
1881                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1882         }
1883
1884         /* Free the good ones back to the mbuf pool thus discarding
1885          * all the bad ones. */
1886         while (good_mbuf_cnt) {
1887                 good_mbuf_cnt--;
1888
1889                 val = good_mbuf[good_mbuf_cnt];
1890                 val = (val << 9) | val | 1;
1891
1892                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1893         }
1894         kfree(good_mbuf);
1895         return 0;
1896 }
1897
1898 static void
1899 bnx2_set_mac_addr(struct bnx2 *bp)
1900 {
1901         u32 val;
1902         u8 *mac_addr = bp->dev->dev_addr;
1903
1904         val = (mac_addr[0] << 8) | mac_addr[1];
1905
1906         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1907
1908         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1909                 (mac_addr[4] << 8) | mac_addr[5];
1910
1911         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1912 }
1913
1914 static inline int
1915 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1916 {
1917         struct sk_buff *skb;
1918         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1919         dma_addr_t mapping;
1920         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1921         unsigned long align;
1922
1923         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1924         if (skb == NULL) {
1925                 return -ENOMEM;
1926         }
1927
1928         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1929                 skb_reserve(skb, BNX2_RX_ALIGN - align);
1930
1931         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1932                 PCI_DMA_FROMDEVICE);
1933
1934         rx_buf->skb = skb;
1935         pci_unmap_addr_set(rx_buf, mapping, mapping);
1936
1937         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1938         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1939
1940         bp->rx_prod_bseq += bp->rx_buf_use_size;
1941
1942         return 0;
1943 }
1944
1945 static int
1946 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
1947 {
1948         struct status_block *sblk = bp->status_blk;
1949         u32 new_link_state, old_link_state;
1950         int is_set = 1;
1951
1952         new_link_state = sblk->status_attn_bits & event;
1953         old_link_state = sblk->status_attn_bits_ack & event;
1954         if (new_link_state != old_link_state) {
1955                 if (new_link_state)
1956                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
1957                 else
1958                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
1959         } else
1960                 is_set = 0;
1961
1962         return is_set;
1963 }
1964
1965 static void
1966 bnx2_phy_int(struct bnx2 *bp)
1967 {
1968         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
1969                 spin_lock(&bp->phy_lock);
1970                 bnx2_set_link(bp);
1971                 spin_unlock(&bp->phy_lock);
1972         }
1973 }
1974
1975 static void
1976 bnx2_tx_int(struct bnx2 *bp)
1977 {
1978         struct status_block *sblk = bp->status_blk;
1979         u16 hw_cons, sw_cons, sw_ring_cons;
1980         int tx_free_bd = 0;
1981
1982         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1983         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1984                 hw_cons++;
1985         }
1986         sw_cons = bp->tx_cons;
1987
1988         while (sw_cons != hw_cons) {
1989                 struct sw_bd *tx_buf;
1990                 struct sk_buff *skb;
1991                 int i, last;
1992
1993                 sw_ring_cons = TX_RING_IDX(sw_cons);
1994
1995                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1996                 skb = tx_buf->skb;
1997
1998                 /* partial BD completions possible with TSO packets */
1999                 if (skb_is_gso(skb)) {
2000                         u16 last_idx, last_ring_idx;
2001
2002                         last_idx = sw_cons +
2003                                 skb_shinfo(skb)->nr_frags + 1;
2004                         last_ring_idx = sw_ring_cons +
2005                                 skb_shinfo(skb)->nr_frags + 1;
2006                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2007                                 last_idx++;
2008                         }
2009                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2010                                 break;
2011                         }
2012                 }
2013
2014                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2015                         skb_headlen(skb), PCI_DMA_TODEVICE);
2016
2017                 tx_buf->skb = NULL;
2018                 last = skb_shinfo(skb)->nr_frags;
2019
2020                 for (i = 0; i < last; i++) {
2021                         sw_cons = NEXT_TX_BD(sw_cons);
2022
2023                         pci_unmap_page(bp->pdev,
2024                                 pci_unmap_addr(
2025                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2026                                         mapping),
2027                                 skb_shinfo(skb)->frags[i].size,
2028                                 PCI_DMA_TODEVICE);
2029                 }
2030
2031                 sw_cons = NEXT_TX_BD(sw_cons);
2032
2033                 tx_free_bd += last + 1;
2034
2035                 dev_kfree_skb(skb);
2036
2037                 hw_cons = bp->hw_tx_cons =
2038                         sblk->status_tx_quick_consumer_index0;
2039
2040                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2041                         hw_cons++;
2042                 }
2043         }
2044
2045         bp->tx_cons = sw_cons;
2046         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2047          * before checking for netif_queue_stopped().  Without the
2048          * memory barrier, there is a small possibility that bnx2_start_xmit()
2049          * will miss it and cause the queue to be stopped forever.
2050          */
2051         smp_mb();
2052
2053         if (unlikely(netif_queue_stopped(bp->dev)) &&
2054                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2055                 netif_tx_lock(bp->dev);
2056                 if ((netif_queue_stopped(bp->dev)) &&
2057                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2058                         netif_wake_queue(bp->dev);
2059                 netif_tx_unlock(bp->dev);
2060         }
2061 }
2062
2063 static inline void
2064 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2065         u16 cons, u16 prod)
2066 {
2067         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2068         struct rx_bd *cons_bd, *prod_bd;
2069
2070         cons_rx_buf = &bp->rx_buf_ring[cons];
2071         prod_rx_buf = &bp->rx_buf_ring[prod];
2072
2073         pci_dma_sync_single_for_device(bp->pdev,
2074                 pci_unmap_addr(cons_rx_buf, mapping),
2075                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2076
2077         bp->rx_prod_bseq += bp->rx_buf_use_size;
2078
2079         prod_rx_buf->skb = skb;
2080
2081         if (cons == prod)
2082                 return;
2083
2084         pci_unmap_addr_set(prod_rx_buf, mapping,
2085                         pci_unmap_addr(cons_rx_buf, mapping));
2086
2087         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2088         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2089         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2090         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2091 }
2092
2093 static int
2094 bnx2_rx_int(struct bnx2 *bp, int budget)
2095 {
2096         struct status_block *sblk = bp->status_blk;
2097         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2098         struct l2_fhdr *rx_hdr;
2099         int rx_pkt = 0;
2100
2101         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2102         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2103                 hw_cons++;
2104         }
2105         sw_cons = bp->rx_cons;
2106         sw_prod = bp->rx_prod;
2107
2108         /* Memory barrier necessary as speculative reads of the rx
2109          * buffer can be ahead of the index in the status block
2110          */
2111         rmb();
2112         while (sw_cons != hw_cons) {
2113                 unsigned int len;
2114                 u32 status;
2115                 struct sw_bd *rx_buf;
2116                 struct sk_buff *skb;
2117                 dma_addr_t dma_addr;
2118
2119                 sw_ring_cons = RX_RING_IDX(sw_cons);
2120                 sw_ring_prod = RX_RING_IDX(sw_prod);
2121
2122                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2123                 skb = rx_buf->skb;
2124
2125                 rx_buf->skb = NULL;
2126
2127                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2128
2129                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2130                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2131
2132                 rx_hdr = (struct l2_fhdr *) skb->data;
2133                 len = rx_hdr->l2_fhdr_pkt_len - 4;
2134
2135                 if ((status = rx_hdr->l2_fhdr_status) &
2136                         (L2_FHDR_ERRORS_BAD_CRC |
2137                         L2_FHDR_ERRORS_PHY_DECODE |
2138                         L2_FHDR_ERRORS_ALIGNMENT |
2139                         L2_FHDR_ERRORS_TOO_SHORT |
2140                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2141
2142                         goto reuse_rx;
2143                 }
2144
2145                 /* Since we don't have a jumbo ring, copy small packets
2146                  * if mtu > 1500
2147                  */
2148                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2149                         struct sk_buff *new_skb;
2150
2151                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2152                         if (new_skb == NULL)
2153                                 goto reuse_rx;
2154
2155                         /* aligned copy */
2156                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2157                                       new_skb->data, len + 2);
2158                         skb_reserve(new_skb, 2);
2159                         skb_put(new_skb, len);
2160
2161                         bnx2_reuse_rx_skb(bp, skb,
2162                                 sw_ring_cons, sw_ring_prod);
2163
2164                         skb = new_skb;
2165                 }
2166                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2167                         pci_unmap_single(bp->pdev, dma_addr,
2168                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2169
2170                         skb_reserve(skb, bp->rx_offset);
2171                         skb_put(skb, len);
2172                 }
2173                 else {
2174 reuse_rx:
2175                         bnx2_reuse_rx_skb(bp, skb,
2176                                 sw_ring_cons, sw_ring_prod);
2177                         goto next_rx;
2178                 }
2179
2180                 skb->protocol = eth_type_trans(skb, bp->dev);
2181
2182                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2183                         (ntohs(skb->protocol) != 0x8100)) {
2184
2185                         dev_kfree_skb(skb);
2186                         goto next_rx;
2187
2188                 }
2189
2190                 skb->ip_summed = CHECKSUM_NONE;
2191                 if (bp->rx_csum &&
2192                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2193                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2194
2195                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2196                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2197                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2198                 }
2199
2200 #ifdef BCM_VLAN
2201                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2202                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2203                                 rx_hdr->l2_fhdr_vlan_tag);
2204                 }
2205                 else
2206 #endif
2207                         netif_receive_skb(skb);
2208
2209                 bp->dev->last_rx = jiffies;
2210                 rx_pkt++;
2211
2212 next_rx:
2213                 sw_cons = NEXT_RX_BD(sw_cons);
2214                 sw_prod = NEXT_RX_BD(sw_prod);
2215
2216                 if ((rx_pkt == budget))
2217                         break;
2218
2219                 /* Refresh hw_cons to see if there is new work */
2220                 if (sw_cons == hw_cons) {
2221                         hw_cons = bp->hw_rx_cons =
2222                                 sblk->status_rx_quick_consumer_index0;
2223                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2224                                 hw_cons++;
2225                         rmb();
2226                 }
2227         }
2228         bp->rx_cons = sw_cons;
2229         bp->rx_prod = sw_prod;
2230
2231         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2232
2233         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2234
2235         mmiowb();
2236
2237         return rx_pkt;
2238
2239 }
2240
2241 /* MSI ISR - The only difference between this and the INTx ISR
2242  * is that the MSI interrupt is always serviced.
2243  */
2244 static irqreturn_t
2245 bnx2_msi(int irq, void *dev_instance)
2246 {
2247         struct net_device *dev = dev_instance;
2248         struct bnx2 *bp = netdev_priv(dev);
2249
2250         prefetch(bp->status_blk);
2251         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2252                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2253                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2254
2255         /* Return here if interrupt is disabled. */
2256         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2257                 return IRQ_HANDLED;
2258
2259         netif_rx_schedule(dev);
2260
2261         return IRQ_HANDLED;
2262 }
2263
2264 static irqreturn_t
2265 bnx2_interrupt(int irq, void *dev_instance)
2266 {
2267         struct net_device *dev = dev_instance;
2268         struct bnx2 *bp = netdev_priv(dev);
2269
2270         /* When using INTx, it is possible for the interrupt to arrive
2271          * at the CPU before the status block posted prior to the
2272          * interrupt. Reading a register will flush the status block.
2273          * When using MSI, the MSI message will always complete after
2274          * the status block write.
2275          */
2276         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2277             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2278              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2279                 return IRQ_NONE;
2280
2281         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2282                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2283                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2284
2285         /* Return here if interrupt is shared and is disabled. */
2286         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2287                 return IRQ_HANDLED;
2288
2289         netif_rx_schedule(dev);
2290
2291         return IRQ_HANDLED;
2292 }
2293
2294 #define STATUS_ATTN_EVENTS      STATUS_ATTN_BITS_LINK_STATE
2295
2296 static inline int
2297 bnx2_has_work(struct bnx2 *bp)
2298 {
2299         struct status_block *sblk = bp->status_blk;
2300
2301         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2302             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2303                 return 1;
2304
2305         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2306             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2307                 return 1;
2308
2309         return 0;
2310 }
2311
2312 static int
2313 bnx2_poll(struct net_device *dev, int *budget)
2314 {
2315         struct bnx2 *bp = netdev_priv(dev);
2316         struct status_block *sblk = bp->status_blk;
2317         u32 status_attn_bits = sblk->status_attn_bits;
2318         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2319
2320         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2321             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2322
2323                 bnx2_phy_int(bp);
2324
2325                 /* This is needed to take care of transient status
2326                  * during link changes.
2327                  */
2328                 REG_WR(bp, BNX2_HC_COMMAND,
2329                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2330                 REG_RD(bp, BNX2_HC_COMMAND);
2331         }
2332
2333         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2334                 bnx2_tx_int(bp);
2335
2336         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2337                 int orig_budget = *budget;
2338                 int work_done;
2339
2340                 if (orig_budget > dev->quota)
2341                         orig_budget = dev->quota;
2342
2343                 work_done = bnx2_rx_int(bp, orig_budget);
2344                 *budget -= work_done;
2345                 dev->quota -= work_done;
2346         }
2347
2348         bp->last_status_idx = bp->status_blk->status_idx;
2349         rmb();
2350
2351         if (!bnx2_has_work(bp)) {
2352                 netif_rx_complete(dev);
2353                 if (likely(bp->flags & USING_MSI_FLAG)) {
2354                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2355                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2356                                bp->last_status_idx);
2357                         return 0;
2358                 }
2359                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2360                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2361                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2362                        bp->last_status_idx);
2363
2364                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2365                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2366                        bp->last_status_idx);
2367                 return 0;
2368         }
2369
2370         return 1;
2371 }
2372
2373 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2374  * from set_multicast.
2375  */
2376 static void
2377 bnx2_set_rx_mode(struct net_device *dev)
2378 {
2379         struct bnx2 *bp = netdev_priv(dev);
2380         u32 rx_mode, sort_mode;
2381         int i;
2382
2383         spin_lock_bh(&bp->phy_lock);
2384
2385         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2386                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2387         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2388 #ifdef BCM_VLAN
2389         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2390                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2391 #else
2392         if (!(bp->flags & ASF_ENABLE_FLAG))
2393                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2394 #endif
2395         if (dev->flags & IFF_PROMISC) {
2396                 /* Promiscuous mode. */
2397                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2398                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2399                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2400         }
2401         else if (dev->flags & IFF_ALLMULTI) {
2402                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2403                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2404                                0xffffffff);
2405                 }
2406                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2407         }
2408         else {
2409                 /* Accept one or more multicast(s). */
2410                 struct dev_mc_list *mclist;
2411                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2412                 u32 regidx;
2413                 u32 bit;
2414                 u32 crc;
2415
2416                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2417
2418                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2419                      i++, mclist = mclist->next) {
2420
2421                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2422                         bit = crc & 0xff;
2423                         regidx = (bit & 0xe0) >> 5;
2424                         bit &= 0x1f;
2425                         mc_filter[regidx] |= (1 << bit);
2426                 }
2427
2428                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2429                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2430                                mc_filter[i]);
2431                 }
2432
2433                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2434         }
2435
2436         if (rx_mode != bp->rx_mode) {
2437                 bp->rx_mode = rx_mode;
2438                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2439         }
2440
2441         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2442         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2443         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2444
2445         spin_unlock_bh(&bp->phy_lock);
2446 }
2447
2448 #define FW_BUF_SIZE     0x8000
2449
2450 static int
2451 bnx2_gunzip_init(struct bnx2 *bp)
2452 {
2453         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2454                 goto gunzip_nomem1;
2455
2456         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2457                 goto gunzip_nomem2;
2458
2459         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2460         if (bp->strm->workspace == NULL)
2461                 goto gunzip_nomem3;
2462
2463         return 0;
2464
2465 gunzip_nomem3:
2466         kfree(bp->strm);
2467         bp->strm = NULL;
2468
2469 gunzip_nomem2:
2470         vfree(bp->gunzip_buf);
2471         bp->gunzip_buf = NULL;
2472
2473 gunzip_nomem1:
2474         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2475                             "uncompression.\n", bp->dev->name);
2476         return -ENOMEM;
2477 }
2478
2479 static void
2480 bnx2_gunzip_end(struct bnx2 *bp)
2481 {
2482         kfree(bp->strm->workspace);
2483
2484         kfree(bp->strm);
2485         bp->strm = NULL;
2486
2487         if (bp->gunzip_buf) {
2488                 vfree(bp->gunzip_buf);
2489                 bp->gunzip_buf = NULL;
2490         }
2491 }
2492
2493 static int
2494 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2495 {
2496         int n, rc;
2497
2498         /* check gzip header */
2499         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2500                 return -EINVAL;
2501
2502         n = 10;
2503
2504 #define FNAME   0x8
2505         if (zbuf[3] & FNAME)
2506                 while ((zbuf[n++] != 0) && (n < len));
2507
2508         bp->strm->next_in = zbuf + n;
2509         bp->strm->avail_in = len - n;
2510         bp->strm->next_out = bp->gunzip_buf;
2511         bp->strm->avail_out = FW_BUF_SIZE;
2512
2513         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2514         if (rc != Z_OK)
2515                 return rc;
2516
2517         rc = zlib_inflate(bp->strm, Z_FINISH);
2518
2519         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2520         *outbuf = bp->gunzip_buf;
2521
2522         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2523                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2524                        bp->dev->name, bp->strm->msg);
2525
2526         zlib_inflateEnd(bp->strm);
2527
2528         if (rc == Z_STREAM_END)
2529                 return 0;
2530
2531         return rc;
2532 }
2533
2534 static void
2535 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2536         u32 rv2p_proc)
2537 {
2538         int i;
2539         u32 val;
2540
2541
2542         for (i = 0; i < rv2p_code_len; i += 8) {
2543                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2544                 rv2p_code++;
2545                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2546                 rv2p_code++;
2547
2548                 if (rv2p_proc == RV2P_PROC1) {
2549                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2550                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2551                 }
2552                 else {
2553                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2554                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2555                 }
2556         }
2557
2558         /* Reset the processor, un-stall is done later. */
2559         if (rv2p_proc == RV2P_PROC1) {
2560                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2561         }
2562         else {
2563                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2564         }
2565 }
2566
2567 static int
2568 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2569 {
2570         u32 offset;
2571         u32 val;
2572         int rc;
2573
2574         /* Halt the CPU. */
2575         val = REG_RD_IND(bp, cpu_reg->mode);
2576         val |= cpu_reg->mode_value_halt;
2577         REG_WR_IND(bp, cpu_reg->mode, val);
2578         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2579
2580         /* Load the Text area. */
2581         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2582         if (fw->gz_text) {
2583                 u32 text_len;
2584                 void *text;
2585
2586                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2587                                  &text_len);
2588                 if (rc)
2589                         return rc;
2590
2591                 fw->text = text;
2592         }
2593         if (fw->gz_text) {
2594                 int j;
2595
2596                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2597                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2598                 }
2599         }
2600
2601         /* Load the Data area. */
2602         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2603         if (fw->data) {
2604                 int j;
2605
2606                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2607                         REG_WR_IND(bp, offset, fw->data[j]);
2608                 }
2609         }
2610
2611         /* Load the SBSS area. */
2612         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2613         if (fw->sbss) {
2614                 int j;
2615
2616                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2617                         REG_WR_IND(bp, offset, fw->sbss[j]);
2618                 }
2619         }
2620
2621         /* Load the BSS area. */
2622         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2623         if (fw->bss) {
2624                 int j;
2625
2626                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2627                         REG_WR_IND(bp, offset, fw->bss[j]);
2628                 }
2629         }
2630
2631         /* Load the Read-Only area. */
2632         offset = cpu_reg->spad_base +
2633                 (fw->rodata_addr - cpu_reg->mips_view_base);
2634         if (fw->rodata) {
2635                 int j;
2636
2637                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2638                         REG_WR_IND(bp, offset, fw->rodata[j]);
2639                 }
2640         }
2641
2642         /* Clear the pre-fetch instruction. */
2643         REG_WR_IND(bp, cpu_reg->inst, 0);
2644         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2645
2646         /* Start the CPU. */
2647         val = REG_RD_IND(bp, cpu_reg->mode);
2648         val &= ~cpu_reg->mode_value_halt;
2649         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2650         REG_WR_IND(bp, cpu_reg->mode, val);
2651
2652         return 0;
2653 }
2654
2655 static int
2656 bnx2_init_cpus(struct bnx2 *bp)
2657 {
2658         struct cpu_reg cpu_reg;
2659         struct fw_info *fw;
2660         int rc = 0;
2661         void *text;
2662         u32 text_len;
2663
2664         if ((rc = bnx2_gunzip_init(bp)) != 0)
2665                 return rc;
2666
2667         /* Initialize the RV2P processor. */
2668         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2669                          &text_len);
2670         if (rc)
2671                 goto init_cpu_err;
2672
2673         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2674
2675         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2676                          &text_len);
2677         if (rc)
2678                 goto init_cpu_err;
2679
2680         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2681
2682         /* Initialize the RX Processor. */
2683         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2684         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2685         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2686         cpu_reg.state = BNX2_RXP_CPU_STATE;
2687         cpu_reg.state_value_clear = 0xffffff;
2688         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2689         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2690         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2691         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2692         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2693         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2694         cpu_reg.mips_view_base = 0x8000000;
2695
2696         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2697                 fw = &bnx2_rxp_fw_09;
2698         else
2699                 fw = &bnx2_rxp_fw_06;
2700
2701         rc = load_cpu_fw(bp, &cpu_reg, fw);
2702         if (rc)
2703                 goto init_cpu_err;
2704
2705         /* Initialize the TX Processor. */
2706         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2707         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2708         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2709         cpu_reg.state = BNX2_TXP_CPU_STATE;
2710         cpu_reg.state_value_clear = 0xffffff;
2711         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2712         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2713         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2714         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2715         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2716         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2717         cpu_reg.mips_view_base = 0x8000000;
2718
2719         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2720                 fw = &bnx2_txp_fw_09;
2721         else
2722                 fw = &bnx2_txp_fw_06;
2723
2724         rc = load_cpu_fw(bp, &cpu_reg, fw);
2725         if (rc)
2726                 goto init_cpu_err;
2727
2728         /* Initialize the TX Patch-up Processor. */
2729         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2730         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2731         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2732         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2733         cpu_reg.state_value_clear = 0xffffff;
2734         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2735         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2736         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2737         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2738         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2739         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2740         cpu_reg.mips_view_base = 0x8000000;
2741
2742         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2743                 fw = &bnx2_tpat_fw_09;
2744         else
2745                 fw = &bnx2_tpat_fw_06;
2746
2747         rc = load_cpu_fw(bp, &cpu_reg, fw);
2748         if (rc)
2749                 goto init_cpu_err;
2750
2751         /* Initialize the Completion Processor. */
2752         cpu_reg.mode = BNX2_COM_CPU_MODE;
2753         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2754         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2755         cpu_reg.state = BNX2_COM_CPU_STATE;
2756         cpu_reg.state_value_clear = 0xffffff;
2757         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2758         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2759         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2760         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2761         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2762         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2763         cpu_reg.mips_view_base = 0x8000000;
2764
2765         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2766                 fw = &bnx2_com_fw_09;
2767         else
2768                 fw = &bnx2_com_fw_06;
2769
2770         rc = load_cpu_fw(bp, &cpu_reg, fw);
2771         if (rc)
2772                 goto init_cpu_err;
2773
2774         /* Initialize the Command Processor. */
2775         cpu_reg.mode = BNX2_CP_CPU_MODE;
2776         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2777         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2778         cpu_reg.state = BNX2_CP_CPU_STATE;
2779         cpu_reg.state_value_clear = 0xffffff;
2780         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2781         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2782         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2783         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2784         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2785         cpu_reg.spad_base = BNX2_CP_SCRATCH;
2786         cpu_reg.mips_view_base = 0x8000000;
2787
2788         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2789                 fw = &bnx2_cp_fw_09;
2790
2791                 rc = load_cpu_fw(bp, &cpu_reg, fw);
2792                 if (rc)
2793                         goto init_cpu_err;
2794         }
2795 init_cpu_err:
2796         bnx2_gunzip_end(bp);
2797         return rc;
2798 }
2799
2800 static int
2801 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2802 {
2803         u16 pmcsr;
2804
2805         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2806
2807         switch (state) {
2808         case PCI_D0: {
2809                 u32 val;
2810
2811                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2812                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2813                         PCI_PM_CTRL_PME_STATUS);
2814
2815                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2816                         /* delay required during transition out of D3hot */
2817                         msleep(20);
2818
2819                 val = REG_RD(bp, BNX2_EMAC_MODE);
2820                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2821                 val &= ~BNX2_EMAC_MODE_MPKT;
2822                 REG_WR(bp, BNX2_EMAC_MODE, val);
2823
2824                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2825                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2826                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2827                 break;
2828         }
2829         case PCI_D3hot: {
2830                 int i;
2831                 u32 val, wol_msg;
2832
2833                 if (bp->wol) {
2834                         u32 advertising;
2835                         u8 autoneg;
2836
2837                         autoneg = bp->autoneg;
2838                         advertising = bp->advertising;
2839
2840                         bp->autoneg = AUTONEG_SPEED;
2841                         bp->advertising = ADVERTISED_10baseT_Half |
2842                                 ADVERTISED_10baseT_Full |
2843                                 ADVERTISED_100baseT_Half |
2844                                 ADVERTISED_100baseT_Full |
2845                                 ADVERTISED_Autoneg;
2846
2847                         bnx2_setup_copper_phy(bp);
2848
2849                         bp->autoneg = autoneg;
2850                         bp->advertising = advertising;
2851
2852                         bnx2_set_mac_addr(bp);
2853
2854                         val = REG_RD(bp, BNX2_EMAC_MODE);
2855
2856                         /* Enable port mode. */
2857                         val &= ~BNX2_EMAC_MODE_PORT;
2858                         val |= BNX2_EMAC_MODE_PORT_MII |
2859                                BNX2_EMAC_MODE_MPKT_RCVD |
2860                                BNX2_EMAC_MODE_ACPI_RCVD |
2861                                BNX2_EMAC_MODE_MPKT;
2862
2863                         REG_WR(bp, BNX2_EMAC_MODE, val);
2864
2865                         /* receive all multicast */
2866                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2867                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2868                                        0xffffffff);
2869                         }
2870                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2871                                BNX2_EMAC_RX_MODE_SORT_MODE);
2872
2873                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2874                               BNX2_RPM_SORT_USER0_MC_EN;
2875                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2876                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2877                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2878                                BNX2_RPM_SORT_USER0_ENA);
2879
2880                         /* Need to enable EMAC and RPM for WOL. */
2881                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2882                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2883                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2884                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2885
2886                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2887                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2888                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2889
2890                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2891                 }
2892                 else {
2893                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2894                 }
2895
2896                 if (!(bp->flags & NO_WOL_FLAG))
2897                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2898
2899                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2900                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2901                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2902
2903                         if (bp->wol)
2904                                 pmcsr |= 3;
2905                 }
2906                 else {
2907                         pmcsr |= 3;
2908                 }
2909                 if (bp->wol) {
2910                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2911                 }
2912                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2913                                       pmcsr);
2914
2915                 /* No more memory access after this point until
2916                  * device is brought back to D0.
2917                  */
2918                 udelay(50);
2919                 break;
2920         }
2921         default:
2922                 return -EINVAL;
2923         }
2924         return 0;
2925 }
2926
2927 static int
2928 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2929 {
2930         u32 val;
2931         int j;
2932
2933         /* Request access to the flash interface. */
2934         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2935         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2936                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2937                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2938                         break;
2939
2940                 udelay(5);
2941         }
2942
2943         if (j >= NVRAM_TIMEOUT_COUNT)
2944                 return -EBUSY;
2945
2946         return 0;
2947 }
2948
2949 static int
2950 bnx2_release_nvram_lock(struct bnx2 *bp)
2951 {
2952         int j;
2953         u32 val;
2954
2955         /* Relinquish nvram interface. */
2956         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2957
2958         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2959                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2960                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2961                         break;
2962
2963                 udelay(5);
2964         }
2965
2966         if (j >= NVRAM_TIMEOUT_COUNT)
2967                 return -EBUSY;
2968
2969         return 0;
2970 }
2971
2972
2973 static int
2974 bnx2_enable_nvram_write(struct bnx2 *bp)
2975 {
2976         u32 val;
2977
2978         val = REG_RD(bp, BNX2_MISC_CFG);
2979         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2980
2981         if (!bp->flash_info->buffered) {
2982                 int j;
2983
2984                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2985                 REG_WR(bp, BNX2_NVM_COMMAND,
2986                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2987
2988                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2989                         udelay(5);
2990
2991                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2992                         if (val & BNX2_NVM_COMMAND_DONE)
2993                                 break;
2994                 }
2995
2996                 if (j >= NVRAM_TIMEOUT_COUNT)
2997                         return -EBUSY;
2998         }
2999         return 0;
3000 }
3001
3002 static void
3003 bnx2_disable_nvram_write(struct bnx2 *bp)
3004 {
3005         u32 val;
3006
3007         val = REG_RD(bp, BNX2_MISC_CFG);
3008         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3009 }
3010
3011
3012 static void
3013 bnx2_enable_nvram_access(struct bnx2 *bp)
3014 {
3015         u32 val;
3016
3017         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3018         /* Enable both bits, even on read. */
3019         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3020                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3021 }
3022
3023 static void
3024 bnx2_disable_nvram_access(struct bnx2 *bp)
3025 {
3026         u32 val;
3027
3028         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3029         /* Disable both bits, even after read. */
3030         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3031                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3032                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3033 }
3034
3035 static int
3036 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3037 {
3038         u32 cmd;
3039         int j;
3040
3041         if (bp->flash_info->buffered)
3042                 /* Buffered flash, no erase needed */
3043                 return 0;
3044
3045         /* Build an erase command */
3046         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3047               BNX2_NVM_COMMAND_DOIT;
3048
3049         /* Need to clear DONE bit separately. */
3050         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3051
3052         /* Address of the NVRAM to read from. */
3053         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3054
3055         /* Issue an erase command. */
3056         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3057
3058         /* Wait for completion. */
3059         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3060                 u32 val;
3061
3062                 udelay(5);
3063
3064                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3065                 if (val & BNX2_NVM_COMMAND_DONE)
3066                         break;
3067         }
3068
3069         if (j >= NVRAM_TIMEOUT_COUNT)
3070                 return -EBUSY;
3071
3072         return 0;
3073 }
3074
3075 static int
3076 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3077 {
3078         u32 cmd;
3079         int j;
3080
3081         /* Build the command word. */
3082         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3083
3084         /* Calculate an offset of a buffered flash. */
3085         if (bp->flash_info->buffered) {
3086                 offset = ((offset / bp->flash_info->page_size) <<
3087                            bp->flash_info->page_bits) +
3088                           (offset % bp->flash_info->page_size);
3089         }
3090
3091         /* Need to clear DONE bit separately. */
3092         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3093
3094         /* Address of the NVRAM to read from. */
3095         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3096
3097         /* Issue a read command. */
3098         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3099
3100         /* Wait for completion. */
3101         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3102                 u32 val;
3103
3104                 udelay(5);
3105
3106                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3107                 if (val & BNX2_NVM_COMMAND_DONE) {
3108                         val = REG_RD(bp, BNX2_NVM_READ);
3109
3110                         val = be32_to_cpu(val);
3111                         memcpy(ret_val, &val, 4);
3112                         break;
3113                 }
3114         }
3115         if (j >= NVRAM_TIMEOUT_COUNT)
3116                 return -EBUSY;
3117
3118         return 0;
3119 }
3120
3121
3122 static int
3123 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3124 {
3125         u32 cmd, val32;
3126         int j;
3127
3128         /* Build the command word. */
3129         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3130
3131         /* Calculate an offset of a buffered flash. */
3132         if (bp->flash_info->buffered) {
3133                 offset = ((offset / bp->flash_info->page_size) <<
3134                           bp->flash_info->page_bits) +
3135                          (offset % bp->flash_info->page_size);
3136         }
3137
3138         /* Need to clear DONE bit separately. */
3139         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3140
3141         memcpy(&val32, val, 4);
3142         val32 = cpu_to_be32(val32);
3143
3144         /* Write the data. */
3145         REG_WR(bp, BNX2_NVM_WRITE, val32);
3146
3147         /* Address of the NVRAM to write to. */
3148         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3149
3150         /* Issue the write command. */
3151         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3152
3153         /* Wait for completion. */
3154         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3155                 udelay(5);
3156
3157                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3158                         break;
3159         }
3160         if (j >= NVRAM_TIMEOUT_COUNT)
3161                 return -EBUSY;
3162
3163         return 0;
3164 }
3165
3166 static int
3167 bnx2_init_nvram(struct bnx2 *bp)
3168 {
3169         u32 val;
3170         int j, entry_count, rc;
3171         struct flash_spec *flash;
3172
3173         /* Determine the selected interface. */
3174         val = REG_RD(bp, BNX2_NVM_CFG1);
3175
3176         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3177
3178         rc = 0;
3179         if (val & 0x40000000) {
3180
3181                 /* Flash interface has been reconfigured */
3182                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3183                      j++, flash++) {
3184                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3185                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3186                                 bp->flash_info = flash;
3187                                 break;
3188                         }
3189                 }
3190         }
3191         else {
3192                 u32 mask;
3193                 /* Not yet been reconfigured */
3194
3195                 if (val & (1 << 23))
3196                         mask = FLASH_BACKUP_STRAP_MASK;
3197                 else
3198                         mask = FLASH_STRAP_MASK;
3199
3200                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3201                         j++, flash++) {
3202
3203                         if ((val & mask) == (flash->strapping & mask)) {
3204                                 bp->flash_info = flash;
3205
3206                                 /* Request access to the flash interface. */
3207                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3208                                         return rc;
3209
3210                                 /* Enable access to flash interface */
3211                                 bnx2_enable_nvram_access(bp);
3212
3213                                 /* Reconfigure the flash interface */
3214                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3215                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3216                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3217                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3218
3219                                 /* Disable access to flash interface */
3220                                 bnx2_disable_nvram_access(bp);
3221                                 bnx2_release_nvram_lock(bp);
3222
3223                                 break;
3224                         }
3225                 }
3226         } /* if (val & 0x40000000) */
3227
3228         if (j == entry_count) {
3229                 bp->flash_info = NULL;
3230                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3231                 return -ENODEV;
3232         }
3233
3234         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3235         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3236         if (val)
3237                 bp->flash_size = val;
3238         else
3239                 bp->flash_size = bp->flash_info->total_size;
3240
3241         return rc;
3242 }
3243
3244 static int
3245 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3246                 int buf_size)
3247 {
3248         int rc = 0;
3249         u32 cmd_flags, offset32, len32, extra;
3250
3251         if (buf_size == 0)
3252                 return 0;
3253
3254         /* Request access to the flash interface. */
3255         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3256                 return rc;
3257
3258         /* Enable access to flash interface */
3259         bnx2_enable_nvram_access(bp);
3260
3261         len32 = buf_size;
3262         offset32 = offset;
3263         extra = 0;
3264
3265         cmd_flags = 0;
3266
3267         if (offset32 & 3) {
3268                 u8 buf[4];
3269                 u32 pre_len;
3270
3271                 offset32 &= ~3;
3272                 pre_len = 4 - (offset & 3);
3273
3274                 if (pre_len >= len32) {
3275                         pre_len = len32;
3276                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3277                                     BNX2_NVM_COMMAND_LAST;
3278                 }
3279                 else {
3280                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3281                 }
3282
3283                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3284
3285                 if (rc)
3286                         return rc;
3287
3288                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3289
3290                 offset32 += 4;
3291                 ret_buf += pre_len;
3292                 len32 -= pre_len;
3293         }
3294         if (len32 & 3) {
3295                 extra = 4 - (len32 & 3);
3296                 len32 = (len32 + 4) & ~3;
3297         }
3298
3299         if (len32 == 4) {
3300                 u8 buf[4];
3301
3302                 if (cmd_flags)
3303                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3304                 else
3305                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3306                                     BNX2_NVM_COMMAND_LAST;
3307
3308                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3309
3310                 memcpy(ret_buf, buf, 4 - extra);
3311         }
3312         else if (len32 > 0) {
3313                 u8 buf[4];
3314
3315                 /* Read the first word. */
3316                 if (cmd_flags)
3317                         cmd_flags = 0;
3318                 else
3319                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3320
3321                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3322
3323                 /* Advance to the next dword. */
3324                 offset32 += 4;
3325                 ret_buf += 4;
3326                 len32 -= 4;
3327
3328                 while (len32 > 4 && rc == 0) {
3329                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3330
3331                         /* Advance to the next dword. */
3332                         offset32 += 4;
3333                         ret_buf += 4;
3334                         len32 -= 4;
3335                 }
3336
3337                 if (rc)
3338                         return rc;
3339
3340                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3341                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3342
3343                 memcpy(ret_buf, buf, 4 - extra);
3344         }
3345
3346         /* Disable access to flash interface */
3347         bnx2_disable_nvram_access(bp);
3348
3349         bnx2_release_nvram_lock(bp);
3350
3351         return rc;
3352 }
3353
3354 static int
3355 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3356                 int buf_size)
3357 {
3358         u32 written, offset32, len32;
3359         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3360         int rc = 0;
3361         int align_start, align_end;
3362
3363         buf = data_buf;
3364         offset32 = offset;
3365         len32 = buf_size;
3366         align_start = align_end = 0;
3367
3368         if ((align_start = (offset32 & 3))) {
3369                 offset32 &= ~3;
3370                 len32 += align_start;
3371                 if (len32 < 4)
3372                         len32 = 4;
3373                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3374                         return rc;
3375         }
3376
3377         if (len32 & 3) {
3378                 align_end = 4 - (len32 & 3);
3379                 len32 += align_end;
3380                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3381                         return rc;
3382         }
3383
3384         if (align_start || align_end) {
3385                 align_buf = kmalloc(len32, GFP_KERNEL);
3386                 if (align_buf == NULL)
3387                         return -ENOMEM;
3388                 if (align_start) {
3389                         memcpy(align_buf, start, 4);
3390                 }
3391                 if (align_end) {
3392                         memcpy(align_buf + len32 - 4, end, 4);
3393                 }
3394                 memcpy(align_buf + align_start, data_buf, buf_size);
3395                 buf = align_buf;
3396         }
3397
3398         if (bp->flash_info->buffered == 0) {
3399                 flash_buffer = kmalloc(264, GFP_KERNEL);
3400                 if (flash_buffer == NULL) {
3401                         rc = -ENOMEM;
3402                         goto nvram_write_end;
3403                 }
3404         }
3405
3406         written = 0;
3407         while ((written < len32) && (rc == 0)) {
3408                 u32 page_start, page_end, data_start, data_end;
3409                 u32 addr, cmd_flags;
3410                 int i;
3411
3412                 /* Find the page_start addr */
3413                 page_start = offset32 + written;
3414                 page_start -= (page_start % bp->flash_info->page_size);
3415                 /* Find the page_end addr */
3416                 page_end = page_start + bp->flash_info->page_size;
3417                 /* Find the data_start addr */
3418                 data_start = (written == 0) ? offset32 : page_start;
3419                 /* Find the data_end addr */
3420                 data_end = (page_end > offset32 + len32) ?
3421                         (offset32 + len32) : page_end;
3422
3423                 /* Request access to the flash interface. */
3424                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3425                         goto nvram_write_end;
3426
3427                 /* Enable access to flash interface */
3428                 bnx2_enable_nvram_access(bp);
3429
3430                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3431                 if (bp->flash_info->buffered == 0) {
3432                         int j;
3433
3434                         /* Read the whole page into the buffer
3435                          * (non-buffer flash only) */
3436                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3437                                 if (j == (bp->flash_info->page_size - 4)) {
3438                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3439                                 }
3440                                 rc = bnx2_nvram_read_dword(bp,
3441                                         page_start + j,
3442                                         &flash_buffer[j],
3443                                         cmd_flags);
3444
3445                                 if (rc)
3446                                         goto nvram_write_end;
3447
3448                                 cmd_flags = 0;
3449                         }
3450                 }
3451
3452                 /* Enable writes to flash interface (unlock write-protect) */
3453                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3454                         goto nvram_write_end;
3455
3456                 /* Loop to write back the buffer data from page_start to
3457                  * data_start */
3458                 i = 0;
3459                 if (bp->flash_info->buffered == 0) {
3460                         /* Erase the page */
3461                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3462                                 goto nvram_write_end;
3463
3464                         /* Re-enable the write again for the actual write */
3465                         bnx2_enable_nvram_write(bp);
3466
3467                         for (addr = page_start; addr < data_start;
3468                                 addr += 4, i += 4) {
3469
3470                                 rc = bnx2_nvram_write_dword(bp, addr,
3471                                         &flash_buffer[i], cmd_flags);
3472
3473                                 if (rc != 0)
3474                                         goto nvram_write_end;
3475
3476                                 cmd_flags = 0;
3477                         }
3478                 }
3479
3480                 /* Loop to write the new data from data_start to data_end */
3481                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3482                         if ((addr == page_end - 4) ||
3483                                 ((bp->flash_info->buffered) &&
3484                                  (addr == data_end - 4))) {
3485
3486                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3487                         }
3488                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3489                                 cmd_flags);
3490
3491                         if (rc != 0)
3492                                 goto nvram_write_end;
3493
3494                         cmd_flags = 0;
3495                         buf += 4;
3496                 }
3497
3498                 /* Loop to write back the buffer data from data_end
3499                  * to page_end */
3500                 if (bp->flash_info->buffered == 0) {
3501                         for (addr = data_end; addr < page_end;
3502                                 addr += 4, i += 4) {
3503
3504                                 if (addr == page_end-4) {
3505                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3506                                 }
3507                                 rc = bnx2_nvram_write_dword(bp, addr,
3508                                         &flash_buffer[i], cmd_flags);
3509
3510                                 if (rc != 0)
3511                                         goto nvram_write_end;
3512
3513                                 cmd_flags = 0;
3514                         }
3515                 }
3516
3517                 /* Disable writes to flash interface (lock write-protect) */
3518                 bnx2_disable_nvram_write(bp);
3519
3520                 /* Disable access to flash interface */
3521                 bnx2_disable_nvram_access(bp);
3522                 bnx2_release_nvram_lock(bp);
3523
3524                 /* Increment written */
3525                 written += data_end - data_start;
3526         }
3527
3528 nvram_write_end:
3529         kfree(flash_buffer);
3530         kfree(align_buf);
3531         return rc;
3532 }
3533
3534 static int
3535 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3536 {
3537         u32 val;
3538         int i, rc = 0;
3539
3540         /* Wait for the current PCI transaction to complete before
3541          * issuing a reset. */
3542         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3543                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3544                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3545                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3546                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3547         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3548         udelay(5);
3549
3550         /* Wait for the firmware to tell us it is ok to issue a reset. */
3551         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3552
3553         /* Deposit a driver reset signature so the firmware knows that
3554          * this is a soft reset. */
3555         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3556                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3557
3558         /* Do a dummy read to force the chip to complete all current transaction
3559          * before we issue a reset. */
3560         val = REG_RD(bp, BNX2_MISC_ID);
3561
3562         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3563                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3564                 REG_RD(bp, BNX2_MISC_COMMAND);
3565                 udelay(5);
3566
3567                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3568                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3569
3570                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3571
3572         } else {
3573                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3574                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3575                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3576
3577                 /* Chip reset. */
3578                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3579
3580                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3581                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3582                         current->state = TASK_UNINTERRUPTIBLE;
3583                         schedule_timeout(HZ / 50);
3584                 }
3585
3586                 /* Reset takes approximate 30 usec */
3587                 for (i = 0; i < 10; i++) {
3588                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3589                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3590                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3591                                 break;
3592                         udelay(10);
3593                 }
3594
3595                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3596                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3597                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3598                         return -EBUSY;
3599                 }
3600         }
3601
3602         /* Make sure byte swapping is properly configured. */
3603         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3604         if (val != 0x01020304) {
3605                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3606                 return -ENODEV;
3607         }
3608
3609         /* Wait for the firmware to finish its initialization. */
3610         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3611         if (rc)
3612                 return rc;
3613
3614         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3615                 /* Adjust the voltage regular to two steps lower.  The default
3616                  * of this register is 0x0000000e. */
3617                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3618
3619                 /* Remove bad rbuf memory from the free pool. */
3620                 rc = bnx2_alloc_bad_rbuf(bp);
3621         }
3622
3623         return rc;
3624 }
3625
3626 static int
3627 bnx2_init_chip(struct bnx2 *bp)
3628 {
3629         u32 val;
3630         int rc;
3631
3632         /* Make sure the interrupt is not active. */
3633         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3634
3635         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3636               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3637 #ifdef __BIG_ENDIAN
3638               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3639 #endif
3640               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3641               DMA_READ_CHANS << 12 |
3642               DMA_WRITE_CHANS << 16;
3643
3644         val |= (0x2 << 20) | (1 << 11);
3645
3646         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3647                 val |= (1 << 23);
3648
3649         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3650             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3651                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3652
3653         REG_WR(bp, BNX2_DMA_CONFIG, val);
3654
3655         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3656                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3657                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3658                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3659         }
3660
3661         if (bp->flags & PCIX_FLAG) {
3662                 u16 val16;
3663
3664                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3665                                      &val16);
3666                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3667                                       val16 & ~PCI_X_CMD_ERO);
3668         }
3669
3670         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3671                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3672                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3673                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3674
3675         /* Initialize context mapping and zero out the quick contexts.  The
3676          * context block must have already been enabled. */
3677         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3678                 bnx2_init_5709_context(bp);
3679         else
3680                 bnx2_init_context(bp);
3681
3682         if ((rc = bnx2_init_cpus(bp)) != 0)
3683                 return rc;
3684
3685         bnx2_init_nvram(bp);
3686
3687         bnx2_set_mac_addr(bp);
3688
3689         val = REG_RD(bp, BNX2_MQ_CONFIG);
3690         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3691         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3692         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3693                 val |= BNX2_MQ_CONFIG_HALT_DIS;
3694
3695         REG_WR(bp, BNX2_MQ_CONFIG, val);
3696
3697         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3698         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3699         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3700
3701         val = (BCM_PAGE_BITS - 8) << 24;
3702         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3703
3704         /* Configure page size. */
3705         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3706         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3707         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3708         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3709
3710         val = bp->mac_addr[0] +
3711               (bp->mac_addr[1] << 8) +
3712               (bp->mac_addr[2] << 16) +
3713               bp->mac_addr[3] +
3714               (bp->mac_addr[4] << 8) +
3715               (bp->mac_addr[5] << 16);
3716         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3717
3718         /* Program the MTU.  Also include 4 bytes for CRC32. */
3719         val = bp->dev->mtu + ETH_HLEN + 4;
3720         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3721                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3722         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3723
3724         bp->last_status_idx = 0;
3725         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3726
3727         /* Set up how to generate a link change interrupt. */
3728         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3729
3730         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3731                (u64) bp->status_blk_mapping & 0xffffffff);
3732         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3733
3734         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3735                (u64) bp->stats_blk_mapping & 0xffffffff);
3736         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3737                (u64) bp->stats_blk_mapping >> 32);
3738
3739         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3740                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3741
3742         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3743                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3744
3745         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3746                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3747
3748         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3749
3750         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3751
3752         REG_WR(bp, BNX2_HC_COM_TICKS,
3753                (bp->com_ticks_int << 16) | bp->com_ticks);
3754
3755         REG_WR(bp, BNX2_HC_CMD_TICKS,
3756                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3757
3758         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3759         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3760
3761         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3762                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3763         else {
3764                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3765                        BNX2_HC_CONFIG_TX_TMR_MODE |
3766                        BNX2_HC_CONFIG_COLLECT_STATS);
3767         }
3768
3769         /* Clear internal stats counters. */
3770         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3771
3772         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
3773
3774         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3775             BNX2_PORT_FEATURE_ASF_ENABLED)
3776                 bp->flags |= ASF_ENABLE_FLAG;
3777
3778         /* Initialize the receive filter. */
3779         bnx2_set_rx_mode(bp->dev);
3780
3781         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3782                           0);
3783
3784         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3785         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3786
3787         udelay(20);
3788
3789         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3790
3791         return rc;
3792 }
3793
3794 static void
3795 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3796 {
3797         u32 val, offset0, offset1, offset2, offset3;
3798
3799         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3800                 offset0 = BNX2_L2CTX_TYPE_XI;
3801                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3802                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3803                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3804         } else {
3805                 offset0 = BNX2_L2CTX_TYPE;
3806                 offset1 = BNX2_L2CTX_CMD_TYPE;
3807                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3808                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3809         }
3810         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3811         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3812
3813         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3814         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3815
3816         val = (u64) bp->tx_desc_mapping >> 32;
3817         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3818
3819         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3820         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3821 }
3822
3823 static void
3824 bnx2_init_tx_ring(struct bnx2 *bp)
3825 {
3826         struct tx_bd *txbd;
3827         u32 cid;
3828
3829         bp->tx_wake_thresh = bp->tx_ring_size / 2;
3830
3831         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3832
3833         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3834         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3835
3836         bp->tx_prod = 0;
3837         bp->tx_cons = 0;
3838         bp->hw_tx_cons = 0;
3839         bp->tx_prod_bseq = 0;
3840
3841         cid = TX_CID;
3842         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3843         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3844
3845         bnx2_init_tx_context(bp, cid);
3846 }
3847
3848 static void
3849 bnx2_init_rx_ring(struct bnx2 *bp)
3850 {
3851         struct rx_bd *rxbd;
3852         int i;
3853         u16 prod, ring_prod;
3854         u32 val;
3855
3856         /* 8 for CRC and VLAN */
3857         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3858         /* hw alignment */
3859         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3860
3861         ring_prod = prod = bp->rx_prod = 0;
3862         bp->rx_cons = 0;
3863         bp->hw_rx_cons = 0;
3864         bp->rx_prod_bseq = 0;
3865
3866         for (i = 0; i < bp->rx_max_ring; i++) {
3867                 int j;
3868
3869                 rxbd = &bp->rx_desc_ring[i][0];
3870                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3871                         rxbd->rx_bd_len = bp->rx_buf_use_size;
3872                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3873                 }
3874                 if (i == (bp->rx_max_ring - 1))
3875                         j = 0;
3876                 else
3877                         j = i + 1;
3878                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3879                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3880                                        0xffffffff;
3881         }
3882
3883         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3884         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3885         val |= 0x02 << 8;
3886         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3887
3888         val = (u64) bp->rx_desc_mapping[0] >> 32;
3889         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3890
3891         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3892         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3893
3894         for (i = 0; i < bp->rx_ring_size; i++) {
3895                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3896                         break;
3897                 }
3898                 prod = NEXT_RX_BD(prod);
3899                 ring_prod = RX_RING_IDX(prod);
3900         }
3901         bp->rx_prod = prod;
3902
3903         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3904
3905         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3906 }
3907
3908 static void
3909 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3910 {
3911         u32 num_rings, max;
3912
3913         bp->rx_ring_size = size;
3914         num_rings = 1;
3915         while (size > MAX_RX_DESC_CNT) {
3916                 size -= MAX_RX_DESC_CNT;
3917                 num_rings++;
3918         }
3919         /* round to next power of 2 */
3920         max = MAX_RX_RINGS;
3921         while ((max & num_rings) == 0)
3922                 max >>= 1;
3923
3924         if (num_rings != max)
3925                 max <<= 1;
3926
3927         bp->rx_max_ring = max;
3928         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3929 }
3930
3931 static void
3932 bnx2_free_tx_skbs(struct bnx2 *bp)
3933 {
3934         int i;
3935
3936         if (bp->tx_buf_ring == NULL)
3937                 return;
3938
3939         for (i = 0; i < TX_DESC_CNT; ) {
3940                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3941                 struct sk_buff *skb = tx_buf->skb;
3942                 int j, last;
3943
3944                 if (skb == NULL) {
3945                         i++;
3946                         continue;
3947                 }
3948
3949                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3950                         skb_headlen(skb), PCI_DMA_TODEVICE);
3951
3952                 tx_buf->skb = NULL;
3953
3954                 last = skb_shinfo(skb)->nr_frags;
3955                 for (j = 0; j < last; j++) {
3956                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3957                         pci_unmap_page(bp->pdev,
3958                                 pci_unmap_addr(tx_buf, mapping),
3959                                 skb_shinfo(skb)->frags[j].size,
3960                                 PCI_DMA_TODEVICE);
3961                 }
3962                 dev_kfree_skb(skb);
3963                 i += j + 1;
3964         }
3965
3966 }
3967
3968 static void
3969 bnx2_free_rx_skbs(struct bnx2 *bp)
3970 {
3971         int i;
3972
3973         if (bp->rx_buf_ring == NULL)
3974                 return;
3975
3976         for (i = 0; i < bp->rx_max_ring_idx; i++) {
3977                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3978                 struct sk_buff *skb = rx_buf->skb;
3979
3980                 if (skb == NULL)
3981                         continue;
3982
3983                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3984                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3985
3986                 rx_buf->skb = NULL;
3987
3988                 dev_kfree_skb(skb);
3989         }
3990 }
3991
3992 static void
3993 bnx2_free_skbs(struct bnx2 *bp)
3994 {
3995         bnx2_free_tx_skbs(bp);
3996         bnx2_free_rx_skbs(bp);
3997 }
3998
3999 static int
4000 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4001 {
4002         int rc;
4003
4004         rc = bnx2_reset_chip(bp, reset_code);
4005         bnx2_free_skbs(bp);
4006         if (rc)
4007                 return rc;
4008
4009         if ((rc = bnx2_init_chip(bp)) != 0)
4010                 return rc;
4011
4012         bnx2_init_tx_ring(bp);
4013         bnx2_init_rx_ring(bp);
4014         return 0;
4015 }
4016
4017 static int
4018 bnx2_init_nic(struct bnx2 *bp)
4019 {
4020         int rc;
4021
4022         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4023                 return rc;
4024
4025         spin_lock_bh(&bp->phy_lock);
4026         bnx2_init_phy(bp);
4027         spin_unlock_bh(&bp->phy_lock);
4028         bnx2_set_link(bp);
4029         return 0;
4030 }
4031
4032 static int
4033 bnx2_test_registers(struct bnx2 *bp)
4034 {
4035         int ret;
4036         int i, is_5709;
4037         static const struct {
4038                 u16   offset;
4039                 u16   flags;
4040 #define BNX2_FL_NOT_5709        1
4041                 u32   rw_mask;
4042                 u32   ro_mask;
4043         } reg_tbl[] = {
4044                 { 0x006c, 0, 0x00000000, 0x0000003f },
4045                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4046                 { 0x0094, 0, 0x00000000, 0x00000000 },
4047
4048                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4049                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4050                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4051                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4052                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4053                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4054                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4055                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4056                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4057
4058                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4059                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4060                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4061                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4062                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4063                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4064
4065                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4066                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4067                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4068
4069                 { 0x1000, 0, 0x00000000, 0x00000001 },
4070                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4071
4072                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4073                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4074                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4075                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4076                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4077                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4078                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4079                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4080                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4081                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4082
4083                 { 0x1800, 0, 0x00000000, 0x00000001 },
4084                 { 0x1804, 0, 0x00000000, 0x00000003 },
4085
4086                 { 0x2800, 0, 0x00000000, 0x00000001 },
4087                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4088                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4089                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4090                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4091                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4092                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4093                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4094                 { 0x2840, 0, 0x00000000, 0xffffffff },
4095                 { 0x2844, 0, 0x00000000, 0xffffffff },
4096                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4097                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4098
4099                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4100                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4101
4102                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4103                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4104                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4105                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4106                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4107                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4108                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4109                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4110                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4111
4112                 { 0x5004, 0, 0x00000000, 0x0000007f },
4113                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4114
4115                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4116                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4117                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4118                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4119                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4120                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4121                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4122                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4123                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4124
4125                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4126                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4127                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4128                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4129                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4130                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4131                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4132                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4133                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4134                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4135                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4136                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4137                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4138                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4139                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4140                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4141                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4142                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4143                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4144                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4145                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4146                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4147                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4148
4149                 { 0xffff, 0, 0x00000000, 0x00000000 },
4150         };
4151
4152         ret = 0;
4153         is_5709 = 0;
4154         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4155                 is_5709 = 1;
4156
4157         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4158                 u32 offset, rw_mask, ro_mask, save_val, val;
4159                 u16 flags = reg_tbl[i].flags;
4160
4161                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4162                         continue;
4163
4164                 offset = (u32) reg_tbl[i].offset;
4165                 rw_mask = reg_tbl[i].rw_mask;
4166                 ro_mask = reg_tbl[i].ro_mask;
4167
4168                 save_val = readl(bp->regview + offset);
4169
4170                 writel(0, bp->regview + offset);
4171
4172                 val = readl(bp->regview + offset);
4173                 if ((val & rw_mask) != 0) {
4174                         goto reg_test_err;
4175                 }
4176
4177                 if ((val & ro_mask) != (save_val & ro_mask)) {
4178                         goto reg_test_err;
4179                 }
4180
4181                 writel(0xffffffff, bp->regview + offset);
4182
4183                 val = readl(bp->regview + offset);
4184                 if ((val & rw_mask) != rw_mask) {
4185                         goto reg_test_err;
4186                 }
4187
4188                 if ((val & ro_mask) != (save_val & ro_mask)) {
4189                         goto reg_test_err;
4190                 }
4191
4192                 writel(save_val, bp->regview + offset);
4193                 continue;
4194
4195 reg_test_err:
4196                 writel(save_val, bp->regview + offset);
4197                 ret = -ENODEV;
4198                 break;
4199         }
4200         return ret;
4201 }
4202
4203 static int
4204 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4205 {
4206         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4207                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4208         int i;
4209
4210         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4211                 u32 offset;
4212
4213                 for (offset = 0; offset < size; offset += 4) {
4214
4215                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4216
4217                         if (REG_RD_IND(bp, start + offset) !=
4218                                 test_pattern[i]) {
4219                                 return -ENODEV;
4220                         }
4221                 }
4222         }
4223         return 0;
4224 }
4225
4226 static int
4227 bnx2_test_memory(struct bnx2 *bp)
4228 {
4229         int ret = 0;
4230         int i;
4231         static struct mem_entry {
4232                 u32   offset;
4233                 u32   len;
4234         } mem_tbl_5706[] = {
4235                 { 0x60000,  0x4000 },
4236                 { 0xa0000,  0x3000 },
4237                 { 0xe0000,  0x4000 },
4238                 { 0x120000, 0x4000 },
4239                 { 0x1a0000, 0x4000 },
4240                 { 0x160000, 0x4000 },
4241                 { 0xffffffff, 0    },
4242         },
4243         mem_tbl_5709[] = {
4244                 { 0x60000,  0x4000 },
4245                 { 0xa0000,  0x3000 },
4246                 { 0xe0000,  0x4000 },
4247                 { 0x120000, 0x4000 },
4248                 { 0x1a0000, 0x4000 },
4249                 { 0xffffffff, 0    },
4250         };
4251         struct mem_entry *mem_tbl;
4252
4253         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4254                 mem_tbl = mem_tbl_5709;
4255         else
4256                 mem_tbl = mem_tbl_5706;
4257
4258         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4259                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4260                         mem_tbl[i].len)) != 0) {
4261                         return ret;
4262                 }
4263         }
4264
4265         return ret;
4266 }
4267
4268 #define BNX2_MAC_LOOPBACK       0
4269 #define BNX2_PHY_LOOPBACK       1
4270
4271 static int
4272 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4273 {
4274         unsigned int pkt_size, num_pkts, i;
4275         struct sk_buff *skb, *rx_skb;
4276         unsigned char *packet;
4277         u16 rx_start_idx, rx_idx;
4278         dma_addr_t map;
4279         struct tx_bd *txbd;
4280         struct sw_bd *rx_buf;
4281         struct l2_fhdr *rx_hdr;
4282         int ret = -ENODEV;
4283
4284         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4285                 bp->loopback = MAC_LOOPBACK;
4286                 bnx2_set_mac_loopback(bp);
4287         }
4288         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4289                 bp->loopback = PHY_LOOPBACK;
4290                 bnx2_set_phy_loopback(bp);
4291         }
4292         else
4293                 return -EINVAL;
4294
4295         pkt_size = 1514;
4296         skb = netdev_alloc_skb(bp->dev, pkt_size);
4297         if (!skb)
4298                 return -ENOMEM;
4299         packet = skb_put(skb, pkt_size);
4300         memcpy(packet, bp->dev->dev_addr, 6);
4301         memset(packet + 6, 0x0, 8);
4302         for (i = 14; i < pkt_size; i++)
4303                 packet[i] = (unsigned char) (i & 0xff);
4304
4305         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4306                 PCI_DMA_TODEVICE);
4307
4308         REG_WR(bp, BNX2_HC_COMMAND,
4309                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4310
4311         REG_RD(bp, BNX2_HC_COMMAND);
4312
4313         udelay(5);
4314         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4315
4316         num_pkts = 0;
4317
4318         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4319
4320         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4321         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4322         txbd->tx_bd_mss_nbytes = pkt_size;
4323         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4324
4325         num_pkts++;
4326         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4327         bp->tx_prod_bseq += pkt_size;
4328
4329         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4330         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4331
4332         udelay(100);
4333
4334         REG_WR(bp, BNX2_HC_COMMAND,
4335                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4336
4337         REG_RD(bp, BNX2_HC_COMMAND);
4338
4339         udelay(5);
4340
4341         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4342         dev_kfree_skb(skb);
4343
4344         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4345                 goto loopback_test_done;
4346         }
4347
4348         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4349         if (rx_idx != rx_start_idx + num_pkts) {
4350                 goto loopback_test_done;
4351         }
4352
4353         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4354         rx_skb = rx_buf->skb;
4355
4356         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4357         skb_reserve(rx_skb, bp->rx_offset);
4358
4359         pci_dma_sync_single_for_cpu(bp->pdev,
4360                 pci_unmap_addr(rx_buf, mapping),
4361                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4362
4363         if (rx_hdr->l2_fhdr_status &
4364                 (L2_FHDR_ERRORS_BAD_CRC |
4365                 L2_FHDR_ERRORS_PHY_DECODE |
4366                 L2_FHDR_ERRORS_ALIGNMENT |
4367                 L2_FHDR_ERRORS_TOO_SHORT |
4368                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4369
4370                 goto loopback_test_done;
4371         }
4372
4373         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4374                 goto loopback_test_done;
4375         }
4376
4377         for (i = 14; i < pkt_size; i++) {
4378                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4379                         goto loopback_test_done;
4380                 }
4381         }
4382
4383         ret = 0;
4384
4385 loopback_test_done:
4386         bp->loopback = 0;
4387         return ret;
4388 }
4389
4390 #define BNX2_MAC_LOOPBACK_FAILED        1
4391 #define BNX2_PHY_LOOPBACK_FAILED        2
4392 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4393                                          BNX2_PHY_LOOPBACK_FAILED)
4394
4395 static int
4396 bnx2_test_loopback(struct bnx2 *bp)
4397 {
4398         int rc = 0;
4399
4400         if (!netif_running(bp->dev))
4401                 return BNX2_LOOPBACK_FAILED;
4402
4403         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4404         spin_lock_bh(&bp->phy_lock);
4405         bnx2_init_phy(bp);
4406         spin_unlock_bh(&bp->phy_lock);
4407         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4408                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4409         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4410                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4411         return rc;
4412 }
4413
4414 #define NVRAM_SIZE 0x200
4415 #define CRC32_RESIDUAL 0xdebb20e3
4416
4417 static int
4418 bnx2_test_nvram(struct bnx2 *bp)
4419 {
4420         u32 buf[NVRAM_SIZE / 4];
4421         u8 *data = (u8 *) buf;
4422         int rc = 0;
4423         u32 magic, csum;
4424
4425         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4426                 goto test_nvram_done;
4427
4428         magic = be32_to_cpu(buf[0]);
4429         if (magic != 0x669955aa) {
4430                 rc = -ENODEV;
4431                 goto test_nvram_done;
4432         }
4433
4434         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4435                 goto test_nvram_done;
4436
4437         csum = ether_crc_le(0x100, data);
4438         if (csum != CRC32_RESIDUAL) {
4439                 rc = -ENODEV;
4440                 goto test_nvram_done;
4441         }
4442
4443         csum = ether_crc_le(0x100, data + 0x100);
4444         if (csum != CRC32_RESIDUAL) {
4445                 rc = -ENODEV;
4446         }
4447
4448 test_nvram_done:
4449         return rc;
4450 }
4451
4452 static int
4453 bnx2_test_link(struct bnx2 *bp)
4454 {
4455         u32 bmsr;
4456
4457         spin_lock_bh(&bp->phy_lock);
4458         bnx2_enable_bmsr1(bp);
4459         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4460         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4461         bnx2_disable_bmsr1(bp);
4462         spin_unlock_bh(&bp->phy_lock);
4463
4464         if (bmsr & BMSR_LSTATUS) {
4465                 return 0;
4466         }
4467         return -ENODEV;
4468 }
4469
4470 static int
4471 bnx2_test_intr(struct bnx2 *bp)
4472 {
4473         int i;
4474         u16 status_idx;
4475
4476         if (!netif_running(bp->dev))
4477                 return -ENODEV;
4478
4479         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4480
4481         /* This register is not touched during run-time. */
4482         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4483         REG_RD(bp, BNX2_HC_COMMAND);
4484
4485         for (i = 0; i < 10; i++) {
4486                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4487                         status_idx) {
4488
4489                         break;
4490                 }
4491
4492                 msleep_interruptible(10);
4493         }
4494         if (i < 10)
4495                 return 0;
4496
4497         return -ENODEV;
4498 }
4499
4500 static void
4501 bnx2_5706_serdes_timer(struct bnx2 *bp)
4502 {
4503         spin_lock(&bp->phy_lock);
4504         if (bp->serdes_an_pending)
4505                 bp->serdes_an_pending--;
4506         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4507                 u32 bmcr;
4508
4509                 bp->current_interval = bp->timer_interval;
4510
4511                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4512
4513                 if (bmcr & BMCR_ANENABLE) {
4514                         u32 phy1, phy2;
4515
4516                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4517                         bnx2_read_phy(bp, 0x1c, &phy1);
4518
4519                         bnx2_write_phy(bp, 0x17, 0x0f01);
4520                         bnx2_read_phy(bp, 0x15, &phy2);
4521                         bnx2_write_phy(bp, 0x17, 0x0f01);
4522                         bnx2_read_phy(bp, 0x15, &phy2);
4523
4524                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4525                                 !(phy2 & 0x20)) {       /* no CONFIG */
4526
4527                                 bmcr &= ~BMCR_ANENABLE;
4528                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4529                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4530                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4531                         }
4532                 }
4533         }
4534         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4535                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4536                 u32 phy2;
4537
4538                 bnx2_write_phy(bp, 0x17, 0x0f01);
4539                 bnx2_read_phy(bp, 0x15, &phy2);
4540                 if (phy2 & 0x20) {
4541                         u32 bmcr;
4542
4543                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4544                         bmcr |= BMCR_ANENABLE;
4545                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4546
4547                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4548                 }
4549         } else
4550                 bp->current_interval = bp->timer_interval;
4551
4552         spin_unlock(&bp->phy_lock);
4553 }
4554
4555 static void
4556 bnx2_5708_serdes_timer(struct bnx2 *bp)
4557 {
4558         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4559                 bp->serdes_an_pending = 0;
4560                 return;
4561         }
4562
4563         spin_lock(&bp->phy_lock);
4564         if (bp->serdes_an_pending)
4565                 bp->serdes_an_pending--;
4566         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4567                 u32 bmcr;
4568
4569                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4570                 if (bmcr & BMCR_ANENABLE) {
4571                         bnx2_enable_forced_2g5(bp);
4572                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4573                 } else {
4574                         bnx2_disable_forced_2g5(bp);
4575                         bp->serdes_an_pending = 2;
4576                         bp->current_interval = bp->timer_interval;
4577                 }
4578
4579         } else
4580                 bp->current_interval = bp->timer_interval;
4581
4582         spin_unlock(&bp->phy_lock);
4583 }
4584
4585 static void
4586 bnx2_timer(unsigned long data)
4587 {
4588         struct bnx2 *bp = (struct bnx2 *) data;
4589         u32 msg;
4590
4591         if (!netif_running(bp->dev))
4592                 return;
4593
4594         if (atomic_read(&bp->intr_sem) != 0)
4595                 goto bnx2_restart_timer;
4596
4597         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4598         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4599
4600         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4601
4602         if (bp->phy_flags & PHY_SERDES_FLAG) {
4603                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4604                         bnx2_5706_serdes_timer(bp);
4605                 else
4606                         bnx2_5708_serdes_timer(bp);
4607         }
4608
4609 bnx2_restart_timer:
4610         mod_timer(&bp->timer, jiffies + bp->current_interval);
4611 }
4612
4613 /* Called with rtnl_lock */
4614 static int
4615 bnx2_open(struct net_device *dev)
4616 {
4617         struct bnx2 *bp = netdev_priv(dev);
4618         int rc;
4619
4620         netif_carrier_off(dev);
4621
4622         bnx2_set_power_state(bp, PCI_D0);
4623         bnx2_disable_int(bp);
4624
4625         rc = bnx2_alloc_mem(bp);
4626         if (rc)
4627                 return rc;
4628
4629         if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4630                 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4631                 !disable_msi) {
4632
4633                 if (pci_enable_msi(bp->pdev) == 0) {
4634                         bp->flags |= USING_MSI_FLAG;
4635                         rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4636                                         dev);
4637                 }
4638                 else {
4639                         rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4640                                         IRQF_SHARED, dev->name, dev);
4641                 }
4642         }
4643         else {
4644                 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4645                                 dev->name, dev);
4646         }
4647         if (rc) {
4648                 bnx2_free_mem(bp);
4649                 return rc;
4650         }
4651
4652         rc = bnx2_init_nic(bp);
4653
4654         if (rc) {
4655                 free_irq(bp->pdev->irq, dev);
4656                 if (bp->flags & USING_MSI_FLAG) {
4657                         pci_disable_msi(bp->pdev);
4658                         bp->flags &= ~USING_MSI_FLAG;
4659                 }
4660                 bnx2_free_skbs(bp);
4661                 bnx2_free_mem(bp);
4662                 return rc;
4663         }
4664
4665         mod_timer(&bp->timer, jiffies + bp->current_interval);
4666
4667         atomic_set(&bp->intr_sem, 0);
4668
4669         bnx2_enable_int(bp);
4670
4671         if (bp->flags & USING_MSI_FLAG) {
4672                 /* Test MSI to make sure it is working
4673                  * If MSI test fails, go back to INTx mode
4674                  */
4675                 if (bnx2_test_intr(bp) != 0) {
4676                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4677                                " using MSI, switching to INTx mode. Please"
4678                                " report this failure to the PCI maintainer"
4679                                " and include system chipset information.\n",
4680                                bp->dev->name);
4681
4682                         bnx2_disable_int(bp);
4683                         free_irq(bp->pdev->irq, dev);
4684                         pci_disable_msi(bp->pdev);
4685                         bp->flags &= ~USING_MSI_FLAG;
4686
4687                         rc = bnx2_init_nic(bp);
4688
4689                         if (!rc) {
4690                                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4691                                         IRQF_SHARED, dev->name, dev);
4692                         }
4693                         if (rc) {
4694                                 bnx2_free_skbs(bp);
4695                                 bnx2_free_mem(bp);
4696                                 del_timer_sync(&bp->timer);
4697                                 return rc;
4698                         }
4699                         bnx2_enable_int(bp);
4700                 }
4701         }
4702         if (bp->flags & USING_MSI_FLAG) {
4703                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4704         }
4705
4706         netif_start_queue(dev);
4707
4708         return 0;
4709 }
4710
4711 static void
4712 bnx2_reset_task(struct work_struct *work)
4713 {
4714         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4715
4716         if (!netif_running(bp->dev))
4717                 return;
4718
4719         bp->in_reset_task = 1;
4720         bnx2_netif_stop(bp);
4721
4722         bnx2_init_nic(bp);
4723
4724         atomic_set(&bp->intr_sem, 1);
4725         bnx2_netif_start(bp);
4726         bp->in_reset_task = 0;
4727 }
4728
4729 static void
4730 bnx2_tx_timeout(struct net_device *dev)
4731 {
4732         struct bnx2 *bp = netdev_priv(dev);
4733
4734         /* This allows the netif to be shutdown gracefully before resetting */
4735         schedule_work(&bp->reset_task);
4736 }
4737
4738 #ifdef BCM_VLAN
4739 /* Called with rtnl_lock */
4740 static void
4741 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4742 {
4743         struct bnx2 *bp = netdev_priv(dev);
4744
4745         bnx2_netif_stop(bp);
4746
4747         bp->vlgrp = vlgrp;
4748         bnx2_set_rx_mode(dev);
4749
4750         bnx2_netif_start(bp);
4751 }
4752
4753 /* Called with rtnl_lock */
4754 static void
4755 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4756 {
4757         struct bnx2 *bp = netdev_priv(dev);
4758
4759         bnx2_netif_stop(bp);
4760         vlan_group_set_device(bp->vlgrp, vid, NULL);
4761         bnx2_set_rx_mode(dev);
4762
4763         bnx2_netif_start(bp);
4764 }
4765 #endif
4766
4767 /* Called with netif_tx_lock.
4768  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4769  * netif_wake_queue().
4770  */
4771 static int
4772 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4773 {
4774         struct bnx2 *bp = netdev_priv(dev);
4775         dma_addr_t mapping;
4776         struct tx_bd *txbd;
4777         struct sw_bd *tx_buf;
4778         u32 len, vlan_tag_flags, last_frag, mss;
4779         u16 prod, ring_prod;
4780         int i;
4781
4782         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4783                 netif_stop_queue(dev);
4784                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4785                         dev->name);
4786
4787                 return NETDEV_TX_BUSY;
4788         }
4789         len = skb_headlen(skb);
4790         prod = bp->tx_prod;
4791         ring_prod = TX_RING_IDX(prod);
4792
4793         vlan_tag_flags = 0;
4794         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4795                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4796         }
4797
4798         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4799                 vlan_tag_flags |=
4800                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4801         }
4802         if ((mss = skb_shinfo(skb)->gso_size) &&
4803                 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4804                 u32 tcp_opt_len, ip_tcp_len;
4805                 struct iphdr *iph;
4806
4807                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4808
4809                 tcp_opt_len = tcp_optlen(skb);
4810
4811                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4812                         u32 tcp_off = skb_transport_offset(skb) -
4813                                       sizeof(struct ipv6hdr) - ETH_HLEN;
4814
4815                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4816                                           TX_BD_FLAGS_SW_FLAGS;
4817                         if (likely(tcp_off == 0))
4818                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4819                         else {
4820                                 tcp_off >>= 3;
4821                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
4822                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
4823                                                   ((tcp_off & 0x10) <<
4824                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
4825                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4826                         }
4827                 } else {
4828                         if (skb_header_cloned(skb) &&
4829                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4830                                 dev_kfree_skb(skb);
4831                                 return NETDEV_TX_OK;
4832                         }
4833
4834                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4835
4836                         iph = ip_hdr(skb);
4837                         iph->check = 0;
4838                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4839                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4840                                                                  iph->daddr, 0,
4841                                                                  IPPROTO_TCP,
4842                                                                  0);
4843                         if (tcp_opt_len || (iph->ihl > 5)) {
4844                                 vlan_tag_flags |= ((iph->ihl - 5) +
4845                                                    (tcp_opt_len >> 2)) << 8;
4846                         }
4847                 }
4848         } else
4849                 mss = 0;
4850
4851         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4852
4853         tx_buf = &bp->tx_buf_ring[ring_prod];
4854         tx_buf->skb = skb;
4855         pci_unmap_addr_set(tx_buf, mapping, mapping);
4856
4857         txbd = &bp->tx_desc_ring[ring_prod];
4858
4859         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4860         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4861         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4862         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4863
4864         last_frag = skb_shinfo(skb)->nr_frags;
4865
4866         for (i = 0; i < last_frag; i++) {
4867                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4868
4869                 prod = NEXT_TX_BD(prod);
4870                 ring_prod = TX_RING_IDX(prod);
4871                 txbd = &bp->tx_desc_ring[ring_prod];
4872
4873                 len = frag->size;
4874                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4875                         len, PCI_DMA_TODEVICE);
4876                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4877                                 mapping, mapping);
4878
4879                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4880                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4881                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4882                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4883
4884         }
4885         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4886
4887         prod = NEXT_TX_BD(prod);
4888         bp->tx_prod_bseq += skb->len;
4889
4890         REG_WR16(bp, bp->tx_bidx_addr, prod);
4891         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4892
4893         mmiowb();
4894
4895         bp->tx_prod = prod;
4896         dev->trans_start = jiffies;
4897
4898         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4899                 netif_stop_queue(dev);
4900                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4901                         netif_wake_queue(dev);
4902         }
4903
4904         return NETDEV_TX_OK;
4905 }
4906
4907 /* Called with rtnl_lock */
4908 static int
4909 bnx2_close(struct net_device *dev)
4910 {
4911         struct bnx2 *bp = netdev_priv(dev);
4912         u32 reset_code;
4913
4914         /* Calling flush_scheduled_work() may deadlock because
4915          * linkwatch_event() may be on the workqueue and it will try to get
4916          * the rtnl_lock which we are holding.
4917          */
4918         while (bp->in_reset_task)
4919                 msleep(1);
4920
4921         bnx2_netif_stop(bp);
4922         del_timer_sync(&bp->timer);
4923         if (bp->flags & NO_WOL_FLAG)
4924                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4925         else if (bp->wol)
4926                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4927         else
4928                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4929         bnx2_reset_chip(bp, reset_code);
4930         free_irq(bp->pdev->irq, dev);
4931         if (bp->flags & USING_MSI_FLAG) {
4932                 pci_disable_msi(bp->pdev);
4933                 bp->flags &= ~USING_MSI_FLAG;
4934         }
4935         bnx2_free_skbs(bp);
4936         bnx2_free_mem(bp);
4937         bp->link_up = 0;
4938         netif_carrier_off(bp->dev);
4939         bnx2_set_power_state(bp, PCI_D3hot);
4940         return 0;
4941 }
4942
4943 #define GET_NET_STATS64(ctr)                                    \
4944         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4945         (unsigned long) (ctr##_lo)
4946
4947 #define GET_NET_STATS32(ctr)            \
4948         (ctr##_lo)
4949
4950 #if (BITS_PER_LONG == 64)
4951 #define GET_NET_STATS   GET_NET_STATS64
4952 #else
4953 #define GET_NET_STATS   GET_NET_STATS32
4954 #endif
4955
4956 static struct net_device_stats *
4957 bnx2_get_stats(struct net_device *dev)
4958 {
4959         struct bnx2 *bp = netdev_priv(dev);
4960         struct statistics_block *stats_blk = bp->stats_blk;
4961         struct net_device_stats *net_stats = &bp->net_stats;
4962
4963         if (bp->stats_blk == NULL) {
4964                 return net_stats;
4965         }
4966         net_stats->rx_packets =
4967                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4968                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4969                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4970
4971         net_stats->tx_packets =
4972                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4973                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4974                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4975
4976         net_stats->rx_bytes =
4977                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4978
4979         net_stats->tx_bytes =
4980                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4981
4982         net_stats->multicast =
4983                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4984
4985         net_stats->collisions =
4986                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4987
4988         net_stats->rx_length_errors =
4989                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4990                 stats_blk->stat_EtherStatsOverrsizePkts);
4991
4992         net_stats->rx_over_errors =
4993                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4994
4995         net_stats->rx_frame_errors =
4996                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4997
4998         net_stats->rx_crc_errors =
4999                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5000
5001         net_stats->rx_errors = net_stats->rx_length_errors +
5002                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5003                 net_stats->rx_crc_errors;
5004
5005         net_stats->tx_aborted_errors =
5006                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5007                 stats_blk->stat_Dot3StatsLateCollisions);
5008
5009         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5010             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5011                 net_stats->tx_carrier_errors = 0;
5012         else {
5013                 net_stats->tx_carrier_errors =
5014                         (unsigned long)
5015                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5016         }
5017
5018         net_stats->tx_errors =
5019                 (unsigned long)
5020                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5021                 +
5022                 net_stats->tx_aborted_errors +
5023                 net_stats->tx_carrier_errors;
5024
5025         net_stats->rx_missed_errors =
5026                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5027                 stats_blk->stat_FwRxDrop);
5028
5029         return net_stats;
5030 }
5031
5032 /* All ethtool functions called with rtnl_lock */
5033
5034 static int
5035 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5036 {
5037         struct bnx2 *bp = netdev_priv(dev);
5038
5039         cmd->supported = SUPPORTED_Autoneg;
5040         if (bp->phy_flags & PHY_SERDES_FLAG) {
5041                 cmd->supported |= SUPPORTED_1000baseT_Full |
5042                         SUPPORTED_FIBRE;
5043                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5044                         cmd->supported |= SUPPORTED_2500baseX_Full;
5045
5046                 cmd->port = PORT_FIBRE;
5047         }
5048         else {
5049                 cmd->supported |= SUPPORTED_10baseT_Half |
5050                         SUPPORTED_10baseT_Full |
5051                         SUPPORTED_100baseT_Half |
5052                         SUPPORTED_100baseT_Full |
5053                         SUPPORTED_1000baseT_Full |
5054                         SUPPORTED_TP;
5055
5056                 cmd->port = PORT_TP;
5057         }
5058
5059         cmd->advertising = bp->advertising;
5060
5061         if (bp->autoneg & AUTONEG_SPEED) {
5062                 cmd->autoneg = AUTONEG_ENABLE;
5063         }
5064         else {
5065                 cmd->autoneg = AUTONEG_DISABLE;
5066         }
5067
5068         if (netif_carrier_ok(dev)) {
5069                 cmd->speed = bp->line_speed;
5070                 cmd->duplex = bp->duplex;
5071         }
5072         else {
5073                 cmd->speed = -1;
5074                 cmd->duplex = -1;
5075         }
5076
5077         cmd->transceiver = XCVR_INTERNAL;
5078         cmd->phy_address = bp->phy_addr;
5079
5080         return 0;
5081 }
5082
5083 static int
5084 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5085 {
5086         struct bnx2 *bp = netdev_priv(dev);
5087         u8 autoneg = bp->autoneg;
5088         u8 req_duplex = bp->req_duplex;
5089         u16 req_line_speed = bp->req_line_speed;
5090         u32 advertising = bp->advertising;
5091
5092         if (cmd->autoneg == AUTONEG_ENABLE) {
5093                 autoneg |= AUTONEG_SPEED;
5094
5095                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5096
5097                 /* allow advertising 1 speed */
5098                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5099                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5100                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5101                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5102
5103                         if (bp->phy_flags & PHY_SERDES_FLAG)
5104                                 return -EINVAL;
5105
5106                         advertising = cmd->advertising;
5107
5108                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5109                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5110                                 return -EINVAL;
5111                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
5112                         advertising = cmd->advertising;
5113                 }
5114                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5115                         return -EINVAL;
5116                 }
5117                 else {
5118                         if (bp->phy_flags & PHY_SERDES_FLAG) {
5119                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5120                         }
5121                         else {
5122                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5123                         }
5124                 }
5125                 advertising |= ADVERTISED_Autoneg;
5126         }
5127         else {
5128                 if (bp->phy_flags & PHY_SERDES_FLAG) {
5129                         if ((cmd->speed != SPEED_1000 &&
5130                              cmd->speed != SPEED_2500) ||
5131                             (cmd->duplex != DUPLEX_FULL))
5132                                 return -EINVAL;
5133
5134                         if (cmd->speed == SPEED_2500 &&
5135                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5136                                 return -EINVAL;
5137                 }
5138                 else if (cmd->speed == SPEED_1000) {
5139                         return -EINVAL;
5140                 }
5141                 autoneg &= ~AUTONEG_SPEED;
5142                 req_line_speed = cmd->speed;
5143                 req_duplex = cmd->duplex;
5144                 advertising = 0;
5145         }
5146
5147         bp->autoneg = autoneg;
5148         bp->advertising = advertising;
5149         bp->req_line_speed = req_line_speed;
5150         bp->req_duplex = req_duplex;
5151
5152         spin_lock_bh(&bp->phy_lock);
5153
5154         bnx2_setup_phy(bp);
5155
5156         spin_unlock_bh(&bp->phy_lock);
5157
5158         return 0;
5159 }
5160
5161 static void
5162 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5163 {
5164         struct bnx2 *bp = netdev_priv(dev);
5165
5166         strcpy(info->driver, DRV_MODULE_NAME);
5167         strcpy(info->version, DRV_MODULE_VERSION);
5168         strcpy(info->bus_info, pci_name(bp->pdev));
5169         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5170         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5171         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5172         info->fw_version[1] = info->fw_version[3] = '.';
5173         info->fw_version[5] = 0;
5174 }
5175
5176 #define BNX2_REGDUMP_LEN                (32 * 1024)
5177
5178 static int
5179 bnx2_get_regs_len(struct net_device *dev)
5180 {
5181         return BNX2_REGDUMP_LEN;
5182 }
5183
5184 static void
5185 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5186 {
5187         u32 *p = _p, i, offset;
5188         u8 *orig_p = _p;
5189         struct bnx2 *bp = netdev_priv(dev);
5190         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5191                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5192                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5193                                  0x1040, 0x1048, 0x1080, 0x10a4,
5194                                  0x1400, 0x1490, 0x1498, 0x14f0,
5195                                  0x1500, 0x155c, 0x1580, 0x15dc,
5196                                  0x1600, 0x1658, 0x1680, 0x16d8,
5197                                  0x1800, 0x1820, 0x1840, 0x1854,
5198                                  0x1880, 0x1894, 0x1900, 0x1984,
5199                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5200                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5201                                  0x2000, 0x2030, 0x23c0, 0x2400,
5202                                  0x2800, 0x2820, 0x2830, 0x2850,
5203                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5204                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5205                                  0x4080, 0x4090, 0x43c0, 0x4458,
5206                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5207                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5208                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5209                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5210                                  0x6800, 0x6848, 0x684c, 0x6860,
5211                                  0x6888, 0x6910, 0x8000 };
5212
5213         regs->version = 0;
5214
5215         memset(p, 0, BNX2_REGDUMP_LEN);
5216
5217         if (!netif_running(bp->dev))
5218                 return;
5219
5220         i = 0;
5221         offset = reg_boundaries[0];
5222         p += offset;
5223         while (offset < BNX2_REGDUMP_LEN) {
5224                 *p++ = REG_RD(bp, offset);
5225                 offset += 4;
5226                 if (offset == reg_boundaries[i + 1]) {
5227                         offset = reg_boundaries[i + 2];
5228                         p = (u32 *) (orig_p + offset);
5229                         i += 2;
5230                 }
5231         }
5232 }
5233
5234 static void
5235 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5236 {
5237         struct bnx2 *bp = netdev_priv(dev);
5238
5239         if (bp->flags & NO_WOL_FLAG) {
5240                 wol->supported = 0;
5241                 wol->wolopts = 0;
5242         }
5243         else {
5244                 wol->supported = WAKE_MAGIC;
5245                 if (bp->wol)
5246                         wol->wolopts = WAKE_MAGIC;
5247                 else
5248                         wol->wolopts = 0;
5249         }
5250         memset(&wol->sopass, 0, sizeof(wol->sopass));
5251 }
5252
5253 static int
5254 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5255 {
5256         struct bnx2 *bp = netdev_priv(dev);
5257
5258         if (wol->wolopts & ~WAKE_MAGIC)
5259                 return -EINVAL;
5260
5261         if (wol->wolopts & WAKE_MAGIC) {
5262                 if (bp->flags & NO_WOL_FLAG)
5263                         return -EINVAL;
5264
5265                 bp->wol = 1;
5266         }
5267         else {
5268                 bp->wol = 0;
5269         }
5270         return 0;
5271 }
5272
5273 static int
5274 bnx2_nway_reset(struct net_device *dev)
5275 {
5276         struct bnx2 *bp = netdev_priv(dev);
5277         u32 bmcr;
5278
5279         if (!(bp->autoneg & AUTONEG_SPEED)) {
5280                 return -EINVAL;
5281         }
5282
5283         spin_lock_bh(&bp->phy_lock);
5284
5285         /* Force a link down visible on the other side */
5286         if (bp->phy_flags & PHY_SERDES_FLAG) {
5287                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5288                 spin_unlock_bh(&bp->phy_lock);
5289
5290                 msleep(20);
5291
5292                 spin_lock_bh(&bp->phy_lock);
5293
5294                 bp->current_interval = SERDES_AN_TIMEOUT;
5295                 bp->serdes_an_pending = 1;
5296                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5297         }
5298
5299         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5300         bmcr &= ~BMCR_LOOPBACK;
5301         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5302
5303         spin_unlock_bh(&bp->phy_lock);
5304
5305         return 0;
5306 }
5307
5308 static int
5309 bnx2_get_eeprom_len(struct net_device *dev)
5310 {
5311         struct bnx2 *bp = netdev_priv(dev);
5312
5313         if (bp->flash_info == NULL)
5314                 return 0;
5315
5316         return (int) bp->flash_size;
5317 }
5318
5319 static int
5320 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5321                 u8 *eebuf)
5322 {
5323         struct bnx2 *bp = netdev_priv(dev);
5324         int rc;
5325
5326         /* parameters already validated in ethtool_get_eeprom */
5327
5328         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5329
5330         return rc;
5331 }
5332
5333 static int
5334 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5335                 u8 *eebuf)
5336 {
5337         struct bnx2 *bp = netdev_priv(dev);
5338         int rc;
5339
5340         /* parameters already validated in ethtool_set_eeprom */
5341
5342         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5343
5344         return rc;
5345 }
5346
5347 static int
5348 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5349 {
5350         struct bnx2 *bp = netdev_priv(dev);
5351
5352         memset(coal, 0, sizeof(struct ethtool_coalesce));
5353
5354         coal->rx_coalesce_usecs = bp->rx_ticks;
5355         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5356         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5357         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5358
5359         coal->tx_coalesce_usecs = bp->tx_ticks;
5360         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5361         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5362         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5363
5364         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5365
5366         return 0;
5367 }
5368
5369 static int
5370 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5371 {
5372         struct bnx2 *bp = netdev_priv(dev);
5373
5374         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5375         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5376
5377         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5378         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5379
5380         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5381         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5382
5383         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5384         if (bp->rx_quick_cons_trip_int > 0xff)
5385                 bp->rx_quick_cons_trip_int = 0xff;
5386
5387         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5388         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5389
5390         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5391         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5392
5393         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5394         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5395
5396         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5397         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5398                 0xff;
5399
5400         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5401         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5402         bp->stats_ticks &= 0xffff00;
5403
5404         if (netif_running(bp->dev)) {
5405                 bnx2_netif_stop(bp);
5406                 bnx2_init_nic(bp);
5407                 bnx2_netif_start(bp);
5408         }
5409
5410         return 0;
5411 }
5412
5413 static void
5414 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5415 {
5416         struct bnx2 *bp = netdev_priv(dev);
5417
5418         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5419         ering->rx_mini_max_pending = 0;
5420         ering->rx_jumbo_max_pending = 0;
5421
5422         ering->rx_pending = bp->rx_ring_size;
5423         ering->rx_mini_pending = 0;
5424         ering->rx_jumbo_pending = 0;
5425
5426         ering->tx_max_pending = MAX_TX_DESC_CNT;
5427         ering->tx_pending = bp->tx_ring_size;
5428 }
5429
5430 static int
5431 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5432 {
5433         struct bnx2 *bp = netdev_priv(dev);
5434
5435         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5436                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5437                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5438
5439                 return -EINVAL;
5440         }
5441         if (netif_running(bp->dev)) {
5442                 bnx2_netif_stop(bp);
5443                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5444                 bnx2_free_skbs(bp);
5445                 bnx2_free_mem(bp);
5446         }
5447
5448         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5449         bp->tx_ring_size = ering->tx_pending;
5450
5451         if (netif_running(bp->dev)) {
5452                 int rc;
5453
5454                 rc = bnx2_alloc_mem(bp);
5455                 if (rc)
5456                         return rc;
5457                 bnx2_init_nic(bp);
5458                 bnx2_netif_start(bp);
5459         }
5460
5461         return 0;
5462 }
5463
5464 static void
5465 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5466 {
5467         struct bnx2 *bp = netdev_priv(dev);
5468
5469         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5470         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5471         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5472 }
5473
5474 static int
5475 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5476 {
5477         struct bnx2 *bp = netdev_priv(dev);
5478
5479         bp->req_flow_ctrl = 0;
5480         if (epause->rx_pause)
5481                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5482         if (epause->tx_pause)
5483                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5484
5485         if (epause->autoneg) {
5486                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5487         }
5488         else {
5489                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5490         }
5491
5492         spin_lock_bh(&bp->phy_lock);
5493
5494         bnx2_setup_phy(bp);
5495
5496         spin_unlock_bh(&bp->phy_lock);
5497
5498         return 0;
5499 }
5500
5501 static u32
5502 bnx2_get_rx_csum(struct net_device *dev)
5503 {
5504         struct bnx2 *bp = netdev_priv(dev);
5505
5506         return bp->rx_csum;
5507 }
5508
5509 static int
5510 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5511 {
5512         struct bnx2 *bp = netdev_priv(dev);
5513
5514         bp->rx_csum = data;
5515         return 0;
5516 }
5517
5518 static int
5519 bnx2_set_tso(struct net_device *dev, u32 data)
5520 {
5521         struct bnx2 *bp = netdev_priv(dev);
5522
5523         if (data) {
5524                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5525                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5526                         dev->features |= NETIF_F_TSO6;
5527         } else
5528                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5529                                    NETIF_F_TSO_ECN);
5530         return 0;
5531 }
5532
5533 #define BNX2_NUM_STATS 46
5534
5535 static struct {
5536         char string[ETH_GSTRING_LEN];
5537 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5538         { "rx_bytes" },
5539         { "rx_error_bytes" },
5540         { "tx_bytes" },
5541         { "tx_error_bytes" },
5542         { "rx_ucast_packets" },
5543         { "rx_mcast_packets" },
5544         { "rx_bcast_packets" },
5545         { "tx_ucast_packets" },
5546         { "tx_mcast_packets" },
5547         { "tx_bcast_packets" },
5548         { "tx_mac_errors" },
5549         { "tx_carrier_errors" },
5550         { "rx_crc_errors" },
5551         { "rx_align_errors" },
5552         { "tx_single_collisions" },
5553         { "tx_multi_collisions" },
5554         { "tx_deferred" },
5555         { "tx_excess_collisions" },
5556         { "tx_late_collisions" },
5557         { "tx_total_collisions" },
5558         { "rx_fragments" },
5559         { "rx_jabbers" },
5560         { "rx_undersize_packets" },
5561         { "rx_oversize_packets" },
5562         { "rx_64_byte_packets" },
5563         { "rx_65_to_127_byte_packets" },
5564         { "rx_128_to_255_byte_packets" },
5565         { "rx_256_to_511_byte_packets" },
5566         { "rx_512_to_1023_byte_packets" },
5567         { "rx_1024_to_1522_byte_packets" },
5568         { "rx_1523_to_9022_byte_packets" },
5569         { "tx_64_byte_packets" },
5570         { "tx_65_to_127_byte_packets" },
5571         { "tx_128_to_255_byte_packets" },
5572         { "tx_256_to_511_byte_packets" },
5573         { "tx_512_to_1023_byte_packets" },
5574         { "tx_1024_to_1522_byte_packets" },
5575         { "tx_1523_to_9022_byte_packets" },
5576         { "rx_xon_frames" },
5577         { "rx_xoff_frames" },
5578         { "tx_xon_frames" },
5579         { "tx_xoff_frames" },
5580         { "rx_mac_ctrl_frames" },
5581         { "rx_filtered_packets" },
5582         { "rx_discards" },
5583         { "rx_fw_discards" },
5584 };
5585
5586 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5587
5588 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5589     STATS_OFFSET32(stat_IfHCInOctets_hi),
5590     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5591     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5592     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5593     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5594     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5595     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5596     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5597     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5598     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5599     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5600     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5601     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5602     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5603     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5604     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5605     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5606     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5607     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5608     STATS_OFFSET32(stat_EtherStatsCollisions),
5609     STATS_OFFSET32(stat_EtherStatsFragments),
5610     STATS_OFFSET32(stat_EtherStatsJabbers),
5611     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5612     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5613     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5614     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5615     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5616     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5617     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5618     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5619     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5620     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5621     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5622     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5623     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5624     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5625     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5626     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5627     STATS_OFFSET32(stat_XonPauseFramesReceived),
5628     STATS_OFFSET32(stat_XoffPauseFramesReceived),
5629     STATS_OFFSET32(stat_OutXonSent),
5630     STATS_OFFSET32(stat_OutXoffSent),
5631     STATS_OFFSET32(stat_MacControlFramesReceived),
5632     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5633     STATS_OFFSET32(stat_IfInMBUFDiscards),
5634     STATS_OFFSET32(stat_FwRxDrop),
5635 };
5636
5637 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5638  * skipped because of errata.
5639  */
5640 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5641         8,0,8,8,8,8,8,8,8,8,
5642         4,0,4,4,4,4,4,4,4,4,
5643         4,4,4,4,4,4,4,4,4,4,
5644         4,4,4,4,4,4,4,4,4,4,
5645         4,4,4,4,4,4,
5646 };
5647
5648 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5649         8,0,8,8,8,8,8,8,8,8,
5650         4,4,4,4,4,4,4,4,4,4,
5651         4,4,4,4,4,4,4,4,4,4,
5652         4,4,4,4,4,4,4,4,4,4,
5653         4,4,4,4,4,4,
5654 };
5655
5656 #define BNX2_NUM_TESTS 6
5657
5658 static struct {
5659         char string[ETH_GSTRING_LEN];
5660 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5661         { "register_test (offline)" },
5662         { "memory_test (offline)" },
5663         { "loopback_test (offline)" },
5664         { "nvram_test (online)" },
5665         { "interrupt_test (online)" },
5666         { "link_test (online)" },
5667 };
5668
5669 static int
5670 bnx2_self_test_count(struct net_device *dev)
5671 {
5672         return BNX2_NUM_TESTS;
5673 }
5674
5675 static void
5676 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5677 {
5678         struct bnx2 *bp = netdev_priv(dev);
5679
5680         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5681         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5682                 int i;
5683
5684                 bnx2_netif_stop(bp);
5685                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5686                 bnx2_free_skbs(bp);
5687
5688                 if (bnx2_test_registers(bp) != 0) {
5689                         buf[0] = 1;
5690                         etest->flags |= ETH_TEST_FL_FAILED;
5691                 }
5692                 if (bnx2_test_memory(bp) != 0) {
5693                         buf[1] = 1;
5694                         etest->flags |= ETH_TEST_FL_FAILED;
5695                 }
5696                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5697                         etest->flags |= ETH_TEST_FL_FAILED;
5698
5699                 if (!netif_running(bp->dev)) {
5700                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5701                 }
5702                 else {
5703                         bnx2_init_nic(bp);
5704                         bnx2_netif_start(bp);
5705                 }
5706
5707                 /* wait for link up */
5708                 for (i = 0; i < 7; i++) {
5709                         if (bp->link_up)
5710                                 break;
5711                         msleep_interruptible(1000);
5712                 }
5713         }
5714
5715         if (bnx2_test_nvram(bp) != 0) {
5716                 buf[3] = 1;
5717                 etest->flags |= ETH_TEST_FL_FAILED;
5718         }
5719         if (bnx2_test_intr(bp) != 0) {
5720                 buf[4] = 1;
5721                 etest->flags |= ETH_TEST_FL_FAILED;
5722         }
5723
5724         if (bnx2_test_link(bp) != 0) {
5725                 buf[5] = 1;
5726                 etest->flags |= ETH_TEST_FL_FAILED;
5727
5728         }
5729 }
5730
5731 static void
5732 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5733 {
5734         switch (stringset) {
5735         case ETH_SS_STATS:
5736                 memcpy(buf, bnx2_stats_str_arr,
5737                         sizeof(bnx2_stats_str_arr));
5738                 break;
5739         case ETH_SS_TEST:
5740                 memcpy(buf, bnx2_tests_str_arr,
5741                         sizeof(bnx2_tests_str_arr));
5742                 break;
5743         }
5744 }
5745
5746 static int
5747 bnx2_get_stats_count(struct net_device *dev)
5748 {
5749         return BNX2_NUM_STATS;
5750 }
5751
5752 static void
5753 bnx2_get_ethtool_stats(struct net_device *dev,
5754                 struct ethtool_stats *stats, u64 *buf)
5755 {
5756         struct bnx2 *bp = netdev_priv(dev);
5757         int i;
5758         u32 *hw_stats = (u32 *) bp->stats_blk;
5759         u8 *stats_len_arr = NULL;
5760
5761         if (hw_stats == NULL) {
5762                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5763                 return;
5764         }
5765
5766         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5767             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5768             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5769             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5770                 stats_len_arr = bnx2_5706_stats_len_arr;
5771         else
5772                 stats_len_arr = bnx2_5708_stats_len_arr;
5773
5774         for (i = 0; i < BNX2_NUM_STATS; i++) {
5775                 if (stats_len_arr[i] == 0) {
5776                         /* skip this counter */
5777                         buf[i] = 0;
5778                         continue;
5779                 }
5780                 if (stats_len_arr[i] == 4) {
5781                         /* 4-byte counter */
5782                         buf[i] = (u64)
5783                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5784                         continue;
5785                 }
5786                 /* 8-byte counter */
5787                 buf[i] = (((u64) *(hw_stats +
5788                                         bnx2_stats_offset_arr[i])) << 32) +
5789                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5790         }
5791 }
5792
5793 static int
5794 bnx2_phys_id(struct net_device *dev, u32 data)
5795 {
5796         struct bnx2 *bp = netdev_priv(dev);
5797         int i;
5798         u32 save;
5799
5800         if (data == 0)
5801                 data = 2;
5802
5803         save = REG_RD(bp, BNX2_MISC_CFG);
5804         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5805
5806         for (i = 0; i < (data * 2); i++) {
5807                 if ((i % 2) == 0) {
5808                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5809                 }
5810                 else {
5811                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5812                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5813                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5814                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5815                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5816                                 BNX2_EMAC_LED_TRAFFIC);
5817                 }
5818                 msleep_interruptible(500);
5819                 if (signal_pending(current))
5820                         break;
5821         }
5822         REG_WR(bp, BNX2_EMAC_LED, 0);
5823         REG_WR(bp, BNX2_MISC_CFG, save);
5824         return 0;
5825 }
5826
5827 static int
5828 bnx2_set_tx_csum(struct net_device *dev, u32 data)
5829 {
5830         struct bnx2 *bp = netdev_priv(dev);
5831
5832         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5833                 return (ethtool_op_set_tx_hw_csum(dev, data));
5834         else
5835                 return (ethtool_op_set_tx_csum(dev, data));
5836 }
5837
5838 static const struct ethtool_ops bnx2_ethtool_ops = {
5839         .get_settings           = bnx2_get_settings,
5840         .set_settings           = bnx2_set_settings,
5841         .get_drvinfo            = bnx2_get_drvinfo,
5842         .get_regs_len           = bnx2_get_regs_len,
5843         .get_regs               = bnx2_get_regs,
5844         .get_wol                = bnx2_get_wol,
5845         .set_wol                = bnx2_set_wol,
5846         .nway_reset             = bnx2_nway_reset,
5847         .get_link               = ethtool_op_get_link,
5848         .get_eeprom_len         = bnx2_get_eeprom_len,
5849         .get_eeprom             = bnx2_get_eeprom,
5850         .set_eeprom             = bnx2_set_eeprom,
5851         .get_coalesce           = bnx2_get_coalesce,
5852         .set_coalesce           = bnx2_set_coalesce,
5853         .get_ringparam          = bnx2_get_ringparam,
5854         .set_ringparam          = bnx2_set_ringparam,
5855         .get_pauseparam         = bnx2_get_pauseparam,
5856         .set_pauseparam         = bnx2_set_pauseparam,
5857         .get_rx_csum            = bnx2_get_rx_csum,
5858         .set_rx_csum            = bnx2_set_rx_csum,
5859         .get_tx_csum            = ethtool_op_get_tx_csum,
5860         .set_tx_csum            = bnx2_set_tx_csum,
5861         .get_sg                 = ethtool_op_get_sg,
5862         .set_sg                 = ethtool_op_set_sg,
5863         .get_tso                = ethtool_op_get_tso,
5864         .set_tso                = bnx2_set_tso,
5865         .self_test_count        = bnx2_self_test_count,
5866         .self_test              = bnx2_self_test,
5867         .get_strings            = bnx2_get_strings,
5868         .phys_id                = bnx2_phys_id,
5869         .get_stats_count        = bnx2_get_stats_count,
5870         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5871         .get_perm_addr          = ethtool_op_get_perm_addr,
5872 };
5873
5874 /* Called with rtnl_lock */
5875 static int
5876 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5877 {
5878         struct mii_ioctl_data *data = if_mii(ifr);
5879         struct bnx2 *bp = netdev_priv(dev);
5880         int err;
5881
5882         switch(cmd) {
5883         case SIOCGMIIPHY:
5884                 data->phy_id = bp->phy_addr;
5885
5886                 /* fallthru */
5887         case SIOCGMIIREG: {
5888                 u32 mii_regval;
5889
5890                 if (!netif_running(dev))
5891                         return -EAGAIN;
5892
5893                 spin_lock_bh(&bp->phy_lock);
5894                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5895                 spin_unlock_bh(&bp->phy_lock);
5896
5897                 data->val_out = mii_regval;
5898
5899                 return err;
5900         }
5901
5902         case SIOCSMIIREG:
5903                 if (!capable(CAP_NET_ADMIN))
5904                         return -EPERM;
5905
5906                 if (!netif_running(dev))
5907                         return -EAGAIN;
5908
5909                 spin_lock_bh(&bp->phy_lock);
5910                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5911                 spin_unlock_bh(&bp->phy_lock);
5912
5913                 return err;
5914
5915         default:
5916                 /* do nothing */
5917                 break;
5918         }
5919         return -EOPNOTSUPP;
5920 }
5921
5922 /* Called with rtnl_lock */
5923 static int
5924 bnx2_change_mac_addr(struct net_device *dev, void *p)
5925 {
5926         struct sockaddr *addr = p;
5927         struct bnx2 *bp = netdev_priv(dev);
5928
5929         if (!is_valid_ether_addr(addr->sa_data))
5930                 return -EINVAL;
5931
5932         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5933         if (netif_running(dev))
5934                 bnx2_set_mac_addr(bp);
5935
5936         return 0;
5937 }
5938
5939 /* Called with rtnl_lock */
5940 static int
5941 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5942 {
5943         struct bnx2 *bp = netdev_priv(dev);
5944
5945         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5946                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5947                 return -EINVAL;
5948
5949         dev->mtu = new_mtu;
5950         if (netif_running(dev)) {
5951                 bnx2_netif_stop(bp);
5952
5953                 bnx2_init_nic(bp);
5954
5955                 bnx2_netif_start(bp);
5956         }
5957         return 0;
5958 }
5959
5960 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5961 static void
5962 poll_bnx2(struct net_device *dev)
5963 {
5964         struct bnx2 *bp = netdev_priv(dev);
5965
5966         disable_irq(bp->pdev->irq);
5967         bnx2_interrupt(bp->pdev->irq, dev);
5968         enable_irq(bp->pdev->irq);
5969 }
5970 #endif
5971
5972 static void __devinit
5973 bnx2_get_5709_media(struct bnx2 *bp)
5974 {
5975         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5976         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5977         u32 strap;
5978
5979         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5980                 return;
5981         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5982                 bp->phy_flags |= PHY_SERDES_FLAG;
5983                 return;
5984         }
5985
5986         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5987                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5988         else
5989                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5990
5991         if (PCI_FUNC(bp->pdev->devfn) == 0) {
5992                 switch (strap) {
5993                 case 0x4:
5994                 case 0x5:
5995                 case 0x6:
5996                         bp->phy_flags |= PHY_SERDES_FLAG;
5997                         return;
5998                 }
5999         } else {
6000                 switch (strap) {
6001                 case 0x1:
6002                 case 0x2:
6003                 case 0x4:
6004                         bp->phy_flags |= PHY_SERDES_FLAG;
6005                         return;
6006                 }
6007         }
6008 }
6009
6010 static int __devinit
6011 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6012 {
6013         struct bnx2 *bp;
6014         unsigned long mem_len;
6015         int rc;
6016         u32 reg;
6017         u64 dma_mask, persist_dma_mask;
6018
6019         SET_MODULE_OWNER(dev);
6020         SET_NETDEV_DEV(dev, &pdev->dev);
6021         bp = netdev_priv(dev);
6022
6023         bp->flags = 0;
6024         bp->phy_flags = 0;
6025
6026         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6027         rc = pci_enable_device(pdev);
6028         if (rc) {
6029                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6030                 goto err_out;
6031         }
6032
6033         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6034                 dev_err(&pdev->dev,
6035                         "Cannot find PCI device base address, aborting.\n");
6036                 rc = -ENODEV;
6037                 goto err_out_disable;
6038         }
6039
6040         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6041         if (rc) {
6042                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6043                 goto err_out_disable;
6044         }
6045
6046         pci_set_master(pdev);
6047
6048         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6049         if (bp->pm_cap == 0) {
6050                 dev_err(&pdev->dev,
6051                         "Cannot find power management capability, aborting.\n");
6052                 rc = -EIO;
6053                 goto err_out_release;
6054         }
6055
6056         bp->dev = dev;
6057         bp->pdev = pdev;
6058
6059         spin_lock_init(&bp->phy_lock);
6060         spin_lock_init(&bp->indirect_lock);
6061         INIT_WORK(&bp->reset_task, bnx2_reset_task);
6062
6063         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6064         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6065         dev->mem_end = dev->mem_start + mem_len;
6066         dev->irq = pdev->irq;
6067
6068         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6069
6070         if (!bp->regview) {
6071                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6072                 rc = -ENOMEM;
6073                 goto err_out_release;
6074         }
6075
6076         /* Configure byte swap and enable write to the reg_window registers.
6077          * Rely on CPU to do target byte swapping on big endian systems
6078          * The chip's target access swapping will not swap all accesses
6079          */
6080         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6081                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6082                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6083
6084         bnx2_set_power_state(bp, PCI_D0);
6085
6086         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6087
6088         if (CHIP_NUM(bp) != CHIP_NUM_5709) {
6089                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6090                 if (bp->pcix_cap == 0) {
6091                         dev_err(&pdev->dev,
6092                                 "Cannot find PCIX capability, aborting.\n");
6093                         rc = -EIO;
6094                         goto err_out_unmap;
6095                 }
6096         }
6097
6098         /* 5708 cannot support DMA addresses > 40-bit.  */
6099         if (CHIP_NUM(bp) == CHIP_NUM_5708)
6100                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6101         else
6102                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6103
6104         /* Configure DMA attributes. */
6105         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6106                 dev->features |= NETIF_F_HIGHDMA;
6107                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6108                 if (rc) {
6109                         dev_err(&pdev->dev,
6110                                 "pci_set_consistent_dma_mask failed, aborting.\n");
6111                         goto err_out_unmap;
6112                 }
6113         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6114                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6115                 goto err_out_unmap;
6116         }
6117
6118         /* Get bus information. */
6119         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6120         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6121                 u32 clkreg;
6122
6123                 bp->flags |= PCIX_FLAG;
6124
6125                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6126
6127                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6128                 switch (clkreg) {
6129                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6130                         bp->bus_speed_mhz = 133;
6131                         break;
6132
6133                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6134                         bp->bus_speed_mhz = 100;
6135                         break;
6136
6137                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6138                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6139                         bp->bus_speed_mhz = 66;
6140                         break;
6141
6142                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6143                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6144                         bp->bus_speed_mhz = 50;
6145                         break;
6146
6147                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6148                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6149                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6150                         bp->bus_speed_mhz = 33;
6151                         break;
6152                 }
6153         }
6154         else {
6155                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6156                         bp->bus_speed_mhz = 66;
6157                 else
6158                         bp->bus_speed_mhz = 33;
6159         }
6160
6161         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6162                 bp->flags |= PCI_32BIT_FLAG;
6163
6164         /* 5706A0 may falsely detect SERR and PERR. */
6165         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6166                 reg = REG_RD(bp, PCI_COMMAND);
6167                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6168                 REG_WR(bp, PCI_COMMAND, reg);
6169         }
6170         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6171                 !(bp->flags & PCIX_FLAG)) {
6172
6173                 dev_err(&pdev->dev,
6174                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
6175                 goto err_out_unmap;
6176         }
6177
6178         bnx2_init_nvram(bp);
6179
6180         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6181
6182         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6183        &n