[BNX2]: Seems to not need net/tcp.h
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/checksum.h>
44 #include <linux/workqueue.h>
45 #include <linux/crc32.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/zlib.h>
49
50 #include "bnx2.h"
51 #include "bnx2_fw.h"
52 #include "bnx2_fw2.h"
53
54 #define DRV_MODULE_NAME         "bnx2"
55 #define PFX DRV_MODULE_NAME     ": "
56 #define DRV_MODULE_VERSION      "1.6.2"
57 #define DRV_MODULE_RELDATE      "July 6, 2007"
58
59 #define RUN_AT(x) (jiffies + (x))
60
61 /* Time in jiffies before concluding the transmitter is hung. */
62 #define TX_TIMEOUT  (5*HZ)
63
64 static const char version[] __devinitdata =
65         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
66
67 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
68 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
69 MODULE_LICENSE("GPL");
70 MODULE_VERSION(DRV_MODULE_VERSION);
71
72 static int disable_msi = 0;
73
74 module_param(disable_msi, int, 0);
75 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
76
77 typedef enum {
78         BCM5706 = 0,
79         NC370T,
80         NC370I,
81         BCM5706S,
82         NC370F,
83         BCM5708,
84         BCM5708S,
85         BCM5709,
86         BCM5709S,
87 } board_t;
88
89 /* indexed by board_t, above */
90 static const struct {
91         char *name;
92 } board_info[] __devinitdata = {
93         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94         { "HP NC370T Multifunction Gigabit Server Adapter" },
95         { "HP NC370i Multifunction Gigabit Server Adapter" },
96         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97         { "HP NC370F Multifunction Gigabit Server Adapter" },
98         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
100         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
101         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
102         };
103
104 static struct pci_device_id bnx2_pci_tbl[] = {
105         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
106           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
107         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
109         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
111         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
112           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
113         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
114           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
118           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
120           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
122           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
123         { 0, }
124 };
125
126 static struct flash_spec flash_table[] =
127 {
128         /* Slow EEPROM */
129         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
130          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
131          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
132          "EEPROM - slow"},
133         /* Expansion entry 0001 */
134         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
135          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
136          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
137          "Entry 0001"},
138         /* Saifun SA25F010 (non-buffered flash) */
139         /* strap, cfg1, & write1 need updates */
140         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
141          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
143          "Non-buffered flash (128kB)"},
144         /* Saifun SA25F020 (non-buffered flash) */
145         /* strap, cfg1, & write1 need updates */
146         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
147          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
149          "Non-buffered flash (256kB)"},
150         /* Expansion entry 0100 */
151         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
152          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
154          "Entry 0100"},
155         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
156         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
157          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
159          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
160         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
161         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
162          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
164          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
165         /* Saifun SA25F005 (non-buffered flash) */
166         /* strap, cfg1, & write1 need updates */
167         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
168          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
169          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
170          "Non-buffered flash (64kB)"},
171         /* Fast EEPROM */
172         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
173          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
175          "EEPROM - fast"},
176         /* Expansion entry 1001 */
177         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
178          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180          "Entry 1001"},
181         /* Expansion entry 1010 */
182         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
183          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1010"},
186         /* ATMEL AT45DB011B (buffered flash) */
187         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
188          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
189          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
190          "Buffered flash (128kB)"},
191         /* Expansion entry 1100 */
192         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
193          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195          "Entry 1100"},
196         /* Expansion entry 1101 */
197         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
198          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1101"},
201         /* Ateml Expansion entry 1110 */
202         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
203          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1110 (Atmel)"},
206         /* ATMEL AT45DB021B (buffered flash) */
207         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
208          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
210          "Buffered flash (256kB)"},
211 };
212
213 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
214
215 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
216 {
217         u32 diff;
218
219         smp_mb();
220
221         /* The ring uses 256 indices for 255 entries, one of them
222          * needs to be skipped.
223          */
224         diff = bp->tx_prod - bp->tx_cons;
225         if (unlikely(diff >= TX_DESC_CNT)) {
226                 diff &= 0xffff;
227                 if (diff == TX_DESC_CNT)
228                         diff = MAX_TX_DESC_CNT;
229         }
230         return (bp->tx_ring_size - diff);
231 }
232
233 static u32
234 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
235 {
236         u32 val;
237
238         spin_lock_bh(&bp->indirect_lock);
239         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
240         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
241         spin_unlock_bh(&bp->indirect_lock);
242         return val;
243 }
244
245 static void
246 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
247 {
248         spin_lock_bh(&bp->indirect_lock);
249         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
250         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
251         spin_unlock_bh(&bp->indirect_lock);
252 }
253
254 static void
255 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
256 {
257         offset += cid_addr;
258         spin_lock_bh(&bp->indirect_lock);
259         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
260                 int i;
261
262                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
263                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
264                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
265                 for (i = 0; i < 5; i++) {
266                         u32 val;
267                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
268                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
269                                 break;
270                         udelay(5);
271                 }
272         } else {
273                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
274                 REG_WR(bp, BNX2_CTX_DATA, val);
275         }
276         spin_unlock_bh(&bp->indirect_lock);
277 }
278
279 static int
280 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
281 {
282         u32 val1;
283         int i, ret;
284
285         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
286                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
287                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
288
289                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
290                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
291
292                 udelay(40);
293         }
294
295         val1 = (bp->phy_addr << 21) | (reg << 16) |
296                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
297                 BNX2_EMAC_MDIO_COMM_START_BUSY;
298         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
299
300         for (i = 0; i < 50; i++) {
301                 udelay(10);
302
303                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
304                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
305                         udelay(5);
306
307                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
308                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
309
310                         break;
311                 }
312         }
313
314         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
315                 *val = 0x0;
316                 ret = -EBUSY;
317         }
318         else {
319                 *val = val1;
320                 ret = 0;
321         }
322
323         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
324                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
325                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
326
327                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
328                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
329
330                 udelay(40);
331         }
332
333         return ret;
334 }
335
336 static int
337 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
338 {
339         u32 val1;
340         int i, ret;
341
342         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
343                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
344                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
345
346                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
347                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
348
349                 udelay(40);
350         }
351
352         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
353                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
354                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
355         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
356
357         for (i = 0; i < 50; i++) {
358                 udelay(10);
359
360                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
361                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
362                         udelay(5);
363                         break;
364                 }
365         }
366
367         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
368                 ret = -EBUSY;
369         else
370                 ret = 0;
371
372         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
373                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
375
376                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
377                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378
379                 udelay(40);
380         }
381
382         return ret;
383 }
384
385 static void
386 bnx2_disable_int(struct bnx2 *bp)
387 {
388         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
389                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
390         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
391 }
392
393 static void
394 bnx2_enable_int(struct bnx2 *bp)
395 {
396         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
397                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
398                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
399
400         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
401                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
402
403         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
404 }
405
406 static void
407 bnx2_disable_int_sync(struct bnx2 *bp)
408 {
409         atomic_inc(&bp->intr_sem);
410         bnx2_disable_int(bp);
411         synchronize_irq(bp->pdev->irq);
412 }
413
414 static void
415 bnx2_netif_stop(struct bnx2 *bp)
416 {
417         bnx2_disable_int_sync(bp);
418         if (netif_running(bp->dev)) {
419                 netif_poll_disable(bp->dev);
420                 netif_tx_disable(bp->dev);
421                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
422         }
423 }
424
425 static void
426 bnx2_netif_start(struct bnx2 *bp)
427 {
428         if (atomic_dec_and_test(&bp->intr_sem)) {
429                 if (netif_running(bp->dev)) {
430                         netif_wake_queue(bp->dev);
431                         netif_poll_enable(bp->dev);
432                         bnx2_enable_int(bp);
433                 }
434         }
435 }
436
437 static void
438 bnx2_free_mem(struct bnx2 *bp)
439 {
440         int i;
441
442         for (i = 0; i < bp->ctx_pages; i++) {
443                 if (bp->ctx_blk[i]) {
444                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
445                                             bp->ctx_blk[i],
446                                             bp->ctx_blk_mapping[i]);
447                         bp->ctx_blk[i] = NULL;
448                 }
449         }
450         if (bp->status_blk) {
451                 pci_free_consistent(bp->pdev, bp->status_stats_size,
452                                     bp->status_blk, bp->status_blk_mapping);
453                 bp->status_blk = NULL;
454                 bp->stats_blk = NULL;
455         }
456         if (bp->tx_desc_ring) {
457                 pci_free_consistent(bp->pdev,
458                                     sizeof(struct tx_bd) * TX_DESC_CNT,
459                                     bp->tx_desc_ring, bp->tx_desc_mapping);
460                 bp->tx_desc_ring = NULL;
461         }
462         kfree(bp->tx_buf_ring);
463         bp->tx_buf_ring = NULL;
464         for (i = 0; i < bp->rx_max_ring; i++) {
465                 if (bp->rx_desc_ring[i])
466                         pci_free_consistent(bp->pdev,
467                                             sizeof(struct rx_bd) * RX_DESC_CNT,
468                                             bp->rx_desc_ring[i],
469                                             bp->rx_desc_mapping[i]);
470                 bp->rx_desc_ring[i] = NULL;
471         }
472         vfree(bp->rx_buf_ring);
473         bp->rx_buf_ring = NULL;
474 }
475
476 static int
477 bnx2_alloc_mem(struct bnx2 *bp)
478 {
479         int i, status_blk_size;
480
481         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
482                                   GFP_KERNEL);
483         if (bp->tx_buf_ring == NULL)
484                 return -ENOMEM;
485
486         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
487                                                 sizeof(struct tx_bd) *
488                                                 TX_DESC_CNT,
489                                                 &bp->tx_desc_mapping);
490         if (bp->tx_desc_ring == NULL)
491                 goto alloc_mem_err;
492
493         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
494                                   bp->rx_max_ring);
495         if (bp->rx_buf_ring == NULL)
496                 goto alloc_mem_err;
497
498         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
499                                    bp->rx_max_ring);
500
501         for (i = 0; i < bp->rx_max_ring; i++) {
502                 bp->rx_desc_ring[i] =
503                         pci_alloc_consistent(bp->pdev,
504                                              sizeof(struct rx_bd) * RX_DESC_CNT,
505                                              &bp->rx_desc_mapping[i]);
506                 if (bp->rx_desc_ring[i] == NULL)
507                         goto alloc_mem_err;
508
509         }
510
511         /* Combine status and statistics blocks into one allocation. */
512         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
513         bp->status_stats_size = status_blk_size +
514                                 sizeof(struct statistics_block);
515
516         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
517                                               &bp->status_blk_mapping);
518         if (bp->status_blk == NULL)
519                 goto alloc_mem_err;
520
521         memset(bp->status_blk, 0, bp->status_stats_size);
522
523         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
524                                   status_blk_size);
525
526         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
527
528         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
529                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
530                 if (bp->ctx_pages == 0)
531                         bp->ctx_pages = 1;
532                 for (i = 0; i < bp->ctx_pages; i++) {
533                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
534                                                 BCM_PAGE_SIZE,
535                                                 &bp->ctx_blk_mapping[i]);
536                         if (bp->ctx_blk[i] == NULL)
537                                 goto alloc_mem_err;
538                 }
539         }
540         return 0;
541
542 alloc_mem_err:
543         bnx2_free_mem(bp);
544         return -ENOMEM;
545 }
546
547 static void
548 bnx2_report_fw_link(struct bnx2 *bp)
549 {
550         u32 fw_link_status = 0;
551
552         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
553                 return;
554
555         if (bp->link_up) {
556                 u32 bmsr;
557
558                 switch (bp->line_speed) {
559                 case SPEED_10:
560                         if (bp->duplex == DUPLEX_HALF)
561                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
562                         else
563                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
564                         break;
565                 case SPEED_100:
566                         if (bp->duplex == DUPLEX_HALF)
567                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
568                         else
569                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
570                         break;
571                 case SPEED_1000:
572                         if (bp->duplex == DUPLEX_HALF)
573                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
574                         else
575                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
576                         break;
577                 case SPEED_2500:
578                         if (bp->duplex == DUPLEX_HALF)
579                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
580                         else
581                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
582                         break;
583                 }
584
585                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
586
587                 if (bp->autoneg) {
588                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
589
590                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
591                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
592
593                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
594                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
595                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
596                         else
597                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
598                 }
599         }
600         else
601                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
602
603         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
604 }
605
606 static char *
607 bnx2_xceiver_str(struct bnx2 *bp)
608 {
609         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
610                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
611                  "Copper"));
612 }
613
614 static void
615 bnx2_report_link(struct bnx2 *bp)
616 {
617         if (bp->link_up) {
618                 netif_carrier_on(bp->dev);
619                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
620                        bnx2_xceiver_str(bp));
621
622                 printk("%d Mbps ", bp->line_speed);
623
624                 if (bp->duplex == DUPLEX_FULL)
625                         printk("full duplex");
626                 else
627                         printk("half duplex");
628
629                 if (bp->flow_ctrl) {
630                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
631                                 printk(", receive ");
632                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
633                                         printk("& transmit ");
634                         }
635                         else {
636                                 printk(", transmit ");
637                         }
638                         printk("flow control ON");
639                 }
640                 printk("\n");
641         }
642         else {
643                 netif_carrier_off(bp->dev);
644                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
645                        bnx2_xceiver_str(bp));
646         }
647
648         bnx2_report_fw_link(bp);
649 }
650
651 static void
652 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
653 {
654         u32 local_adv, remote_adv;
655
656         bp->flow_ctrl = 0;
657         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
658                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
659
660                 if (bp->duplex == DUPLEX_FULL) {
661                         bp->flow_ctrl = bp->req_flow_ctrl;
662                 }
663                 return;
664         }
665
666         if (bp->duplex != DUPLEX_FULL) {
667                 return;
668         }
669
670         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
671             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
672                 u32 val;
673
674                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
675                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
676                         bp->flow_ctrl |= FLOW_CTRL_TX;
677                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
678                         bp->flow_ctrl |= FLOW_CTRL_RX;
679                 return;
680         }
681
682         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
683         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
684
685         if (bp->phy_flags & PHY_SERDES_FLAG) {
686                 u32 new_local_adv = 0;
687                 u32 new_remote_adv = 0;
688
689                 if (local_adv & ADVERTISE_1000XPAUSE)
690                         new_local_adv |= ADVERTISE_PAUSE_CAP;
691                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
692                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
693                 if (remote_adv & ADVERTISE_1000XPAUSE)
694                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
695                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
696                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
697
698                 local_adv = new_local_adv;
699                 remote_adv = new_remote_adv;
700         }
701
702         /* See Table 28B-3 of 802.3ab-1999 spec. */
703         if (local_adv & ADVERTISE_PAUSE_CAP) {
704                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
705                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
706                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
707                         }
708                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
709                                 bp->flow_ctrl = FLOW_CTRL_RX;
710                         }
711                 }
712                 else {
713                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
714                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
715                         }
716                 }
717         }
718         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
719                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
720                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
721
722                         bp->flow_ctrl = FLOW_CTRL_TX;
723                 }
724         }
725 }
726
727 static int
728 bnx2_5709s_linkup(struct bnx2 *bp)
729 {
730         u32 val, speed;
731
732         bp->link_up = 1;
733
734         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
735         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
736         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
737
738         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
739                 bp->line_speed = bp->req_line_speed;
740                 bp->duplex = bp->req_duplex;
741                 return 0;
742         }
743         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
744         switch (speed) {
745                 case MII_BNX2_GP_TOP_AN_SPEED_10:
746                         bp->line_speed = SPEED_10;
747                         break;
748                 case MII_BNX2_GP_TOP_AN_SPEED_100:
749                         bp->line_speed = SPEED_100;
750                         break;
751                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
752                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
753                         bp->line_speed = SPEED_1000;
754                         break;
755                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
756                         bp->line_speed = SPEED_2500;
757                         break;
758         }
759         if (val & MII_BNX2_GP_TOP_AN_FD)
760                 bp->duplex = DUPLEX_FULL;
761         else
762                 bp->duplex = DUPLEX_HALF;
763         return 0;
764 }
765
766 static int
767 bnx2_5708s_linkup(struct bnx2 *bp)
768 {
769         u32 val;
770
771         bp->link_up = 1;
772         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
773         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
774                 case BCM5708S_1000X_STAT1_SPEED_10:
775                         bp->line_speed = SPEED_10;
776                         break;
777                 case BCM5708S_1000X_STAT1_SPEED_100:
778                         bp->line_speed = SPEED_100;
779                         break;
780                 case BCM5708S_1000X_STAT1_SPEED_1G:
781                         bp->line_speed = SPEED_1000;
782                         break;
783                 case BCM5708S_1000X_STAT1_SPEED_2G5:
784                         bp->line_speed = SPEED_2500;
785                         break;
786         }
787         if (val & BCM5708S_1000X_STAT1_FD)
788                 bp->duplex = DUPLEX_FULL;
789         else
790                 bp->duplex = DUPLEX_HALF;
791
792         return 0;
793 }
794
795 static int
796 bnx2_5706s_linkup(struct bnx2 *bp)
797 {
798         u32 bmcr, local_adv, remote_adv, common;
799
800         bp->link_up = 1;
801         bp->line_speed = SPEED_1000;
802
803         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
804         if (bmcr & BMCR_FULLDPLX) {
805                 bp->duplex = DUPLEX_FULL;
806         }
807         else {
808                 bp->duplex = DUPLEX_HALF;
809         }
810
811         if (!(bmcr & BMCR_ANENABLE)) {
812                 return 0;
813         }
814
815         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
816         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
817
818         common = local_adv & remote_adv;
819         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
820
821                 if (common & ADVERTISE_1000XFULL) {
822                         bp->duplex = DUPLEX_FULL;
823                 }
824                 else {
825                         bp->duplex = DUPLEX_HALF;
826                 }
827         }
828
829         return 0;
830 }
831
832 static int
833 bnx2_copper_linkup(struct bnx2 *bp)
834 {
835         u32 bmcr;
836
837         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
838         if (bmcr & BMCR_ANENABLE) {
839                 u32 local_adv, remote_adv, common;
840
841                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
842                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
843
844                 common = local_adv & (remote_adv >> 2);
845                 if (common & ADVERTISE_1000FULL) {
846                         bp->line_speed = SPEED_1000;
847                         bp->duplex = DUPLEX_FULL;
848                 }
849                 else if (common & ADVERTISE_1000HALF) {
850                         bp->line_speed = SPEED_1000;
851                         bp->duplex = DUPLEX_HALF;
852                 }
853                 else {
854                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
855                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
856
857                         common = local_adv & remote_adv;
858                         if (common & ADVERTISE_100FULL) {
859                                 bp->line_speed = SPEED_100;
860                                 bp->duplex = DUPLEX_FULL;
861                         }
862                         else if (common & ADVERTISE_100HALF) {
863                                 bp->line_speed = SPEED_100;
864                                 bp->duplex = DUPLEX_HALF;
865                         }
866                         else if (common & ADVERTISE_10FULL) {
867                                 bp->line_speed = SPEED_10;
868                                 bp->duplex = DUPLEX_FULL;
869                         }
870                         else if (common & ADVERTISE_10HALF) {
871                                 bp->line_speed = SPEED_10;
872                                 bp->duplex = DUPLEX_HALF;
873                         }
874                         else {
875                                 bp->line_speed = 0;
876                                 bp->link_up = 0;
877                         }
878                 }
879         }
880         else {
881                 if (bmcr & BMCR_SPEED100) {
882                         bp->line_speed = SPEED_100;
883                 }
884                 else {
885                         bp->line_speed = SPEED_10;
886                 }
887                 if (bmcr & BMCR_FULLDPLX) {
888                         bp->duplex = DUPLEX_FULL;
889                 }
890                 else {
891                         bp->duplex = DUPLEX_HALF;
892                 }
893         }
894
895         return 0;
896 }
897
898 static int
899 bnx2_set_mac_link(struct bnx2 *bp)
900 {
901         u32 val;
902
903         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
904         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
905                 (bp->duplex == DUPLEX_HALF)) {
906                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
907         }
908
909         /* Configure the EMAC mode register. */
910         val = REG_RD(bp, BNX2_EMAC_MODE);
911
912         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
913                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
914                 BNX2_EMAC_MODE_25G_MODE);
915
916         if (bp->link_up) {
917                 switch (bp->line_speed) {
918                         case SPEED_10:
919                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
920                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
921                                         break;
922                                 }
923                                 /* fall through */
924                         case SPEED_100:
925                                 val |= BNX2_EMAC_MODE_PORT_MII;
926                                 break;
927                         case SPEED_2500:
928                                 val |= BNX2_EMAC_MODE_25G_MODE;
929                                 /* fall through */
930                         case SPEED_1000:
931                                 val |= BNX2_EMAC_MODE_PORT_GMII;
932                                 break;
933                 }
934         }
935         else {
936                 val |= BNX2_EMAC_MODE_PORT_GMII;
937         }
938
939         /* Set the MAC to operate in the appropriate duplex mode. */
940         if (bp->duplex == DUPLEX_HALF)
941                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
942         REG_WR(bp, BNX2_EMAC_MODE, val);
943
944         /* Enable/disable rx PAUSE. */
945         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
946
947         if (bp->flow_ctrl & FLOW_CTRL_RX)
948                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
949         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
950
951         /* Enable/disable tx PAUSE. */
952         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
953         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
954
955         if (bp->flow_ctrl & FLOW_CTRL_TX)
956                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
957         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
958
959         /* Acknowledge the interrupt. */
960         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
961
962         return 0;
963 }
964
965 static void
966 bnx2_enable_bmsr1(struct bnx2 *bp)
967 {
968         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
969             (CHIP_NUM(bp) == CHIP_NUM_5709))
970                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
971                                MII_BNX2_BLK_ADDR_GP_STATUS);
972 }
973
974 static void
975 bnx2_disable_bmsr1(struct bnx2 *bp)
976 {
977         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
978             (CHIP_NUM(bp) == CHIP_NUM_5709))
979                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
980                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
981 }
982
983 static int
984 bnx2_test_and_enable_2g5(struct bnx2 *bp)
985 {
986         u32 up1;
987         int ret = 1;
988
989         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
990                 return 0;
991
992         if (bp->autoneg & AUTONEG_SPEED)
993                 bp->advertising |= ADVERTISED_2500baseX_Full;
994
995         if (CHIP_NUM(bp) == CHIP_NUM_5709)
996                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
997
998         bnx2_read_phy(bp, bp->mii_up1, &up1);
999         if (!(up1 & BCM5708S_UP1_2G5)) {
1000                 up1 |= BCM5708S_UP1_2G5;
1001                 bnx2_write_phy(bp, bp->mii_up1, up1);
1002                 ret = 0;
1003         }
1004
1005         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1006                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1007                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1008
1009         return ret;
1010 }
1011
1012 static int
1013 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1014 {
1015         u32 up1;
1016         int ret = 0;
1017
1018         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1019                 return 0;
1020
1021         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1022                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1023
1024         bnx2_read_phy(bp, bp->mii_up1, &up1);
1025         if (up1 & BCM5708S_UP1_2G5) {
1026                 up1 &= ~BCM5708S_UP1_2G5;
1027                 bnx2_write_phy(bp, bp->mii_up1, up1);
1028                 ret = 1;
1029         }
1030
1031         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1032                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1033                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1034
1035         return ret;
1036 }
1037
1038 static void
1039 bnx2_enable_forced_2g5(struct bnx2 *bp)
1040 {
1041         u32 bmcr;
1042
1043         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1044                 return;
1045
1046         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1047                 u32 val;
1048
1049                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1050                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1051                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1052                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1053                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1054                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1055
1056                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1057                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1058                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1059
1060         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1061                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1062                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1063         }
1064
1065         if (bp->autoneg & AUTONEG_SPEED) {
1066                 bmcr &= ~BMCR_ANENABLE;
1067                 if (bp->req_duplex == DUPLEX_FULL)
1068                         bmcr |= BMCR_FULLDPLX;
1069         }
1070         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1071 }
1072
1073 static void
1074 bnx2_disable_forced_2g5(struct bnx2 *bp)
1075 {
1076         u32 bmcr;
1077
1078         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1079                 return;
1080
1081         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1082                 u32 val;
1083
1084                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1085                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1086                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1087                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1088                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1089
1090                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1091                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1092                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1093
1094         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1095                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1096                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1097         }
1098
1099         if (bp->autoneg & AUTONEG_SPEED)
1100                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1101         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1102 }
1103
1104 static int
1105 bnx2_set_link(struct bnx2 *bp)
1106 {
1107         u32 bmsr;
1108         u8 link_up;
1109
1110         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1111                 bp->link_up = 1;
1112                 return 0;
1113         }
1114
1115         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1116                 return 0;
1117
1118         link_up = bp->link_up;
1119
1120         bnx2_enable_bmsr1(bp);
1121         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1122         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1123         bnx2_disable_bmsr1(bp);
1124
1125         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1126             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1127                 u32 val;
1128
1129                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1130                 if (val & BNX2_EMAC_STATUS_LINK)
1131                         bmsr |= BMSR_LSTATUS;
1132                 else
1133                         bmsr &= ~BMSR_LSTATUS;
1134         }
1135
1136         if (bmsr & BMSR_LSTATUS) {
1137                 bp->link_up = 1;
1138
1139                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1140                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1141                                 bnx2_5706s_linkup(bp);
1142                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1143                                 bnx2_5708s_linkup(bp);
1144                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1145                                 bnx2_5709s_linkup(bp);
1146                 }
1147                 else {
1148                         bnx2_copper_linkup(bp);
1149                 }
1150                 bnx2_resolve_flow_ctrl(bp);
1151         }
1152         else {
1153                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1154                     (bp->autoneg & AUTONEG_SPEED))
1155                         bnx2_disable_forced_2g5(bp);
1156
1157                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1158                 bp->link_up = 0;
1159         }
1160
1161         if (bp->link_up != link_up) {
1162                 bnx2_report_link(bp);
1163         }
1164
1165         bnx2_set_mac_link(bp);
1166
1167         return 0;
1168 }
1169
1170 static int
1171 bnx2_reset_phy(struct bnx2 *bp)
1172 {
1173         int i;
1174         u32 reg;
1175
1176         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1177
1178 #define PHY_RESET_MAX_WAIT 100
1179         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1180                 udelay(10);
1181
1182                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1183                 if (!(reg & BMCR_RESET)) {
1184                         udelay(20);
1185                         break;
1186                 }
1187         }
1188         if (i == PHY_RESET_MAX_WAIT) {
1189                 return -EBUSY;
1190         }
1191         return 0;
1192 }
1193
1194 static u32
1195 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1196 {
1197         u32 adv = 0;
1198
1199         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1200                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1201
1202                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1203                         adv = ADVERTISE_1000XPAUSE;
1204                 }
1205                 else {
1206                         adv = ADVERTISE_PAUSE_CAP;
1207                 }
1208         }
1209         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1210                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1211                         adv = ADVERTISE_1000XPSE_ASYM;
1212                 }
1213                 else {
1214                         adv = ADVERTISE_PAUSE_ASYM;
1215                 }
1216         }
1217         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1218                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1219                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1220                 }
1221                 else {
1222                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1223                 }
1224         }
1225         return adv;
1226 }
1227
1228 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1229
1230 static int
1231 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1232 {
1233         u32 speed_arg = 0, pause_adv;
1234
1235         pause_adv = bnx2_phy_get_pause_adv(bp);
1236
1237         if (bp->autoneg & AUTONEG_SPEED) {
1238                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1239                 if (bp->advertising & ADVERTISED_10baseT_Half)
1240                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1241                 if (bp->advertising & ADVERTISED_10baseT_Full)
1242                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1243                 if (bp->advertising & ADVERTISED_100baseT_Half)
1244                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1245                 if (bp->advertising & ADVERTISED_100baseT_Full)
1246                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1247                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1248                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1249                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1250                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1251         } else {
1252                 if (bp->req_line_speed == SPEED_2500)
1253                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1254                 else if (bp->req_line_speed == SPEED_1000)
1255                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1256                 else if (bp->req_line_speed == SPEED_100) {
1257                         if (bp->req_duplex == DUPLEX_FULL)
1258                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1259                         else
1260                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1261                 } else if (bp->req_line_speed == SPEED_10) {
1262                         if (bp->req_duplex == DUPLEX_FULL)
1263                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1264                         else
1265                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1266                 }
1267         }
1268
1269         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1270                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1271         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1272                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1273
1274         if (port == PORT_TP)
1275                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1276                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1277
1278         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1279
1280         spin_unlock_bh(&bp->phy_lock);
1281         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1282         spin_lock_bh(&bp->phy_lock);
1283
1284         return 0;
1285 }
1286
1287 static int
1288 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1289 {
1290         u32 adv, bmcr;
1291         u32 new_adv = 0;
1292
1293         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1294                 return (bnx2_setup_remote_phy(bp, port));
1295
1296         if (!(bp->autoneg & AUTONEG_SPEED)) {
1297                 u32 new_bmcr;
1298                 int force_link_down = 0;
1299
1300                 if (bp->req_line_speed == SPEED_2500) {
1301                         if (!bnx2_test_and_enable_2g5(bp))
1302                                 force_link_down = 1;
1303                 } else if (bp->req_line_speed == SPEED_1000) {
1304                         if (bnx2_test_and_disable_2g5(bp))
1305                                 force_link_down = 1;
1306                 }
1307                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1308                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1309
1310                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1311                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1312                 new_bmcr |= BMCR_SPEED1000;
1313
1314                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1315                         if (bp->req_line_speed == SPEED_2500)
1316                                 bnx2_enable_forced_2g5(bp);
1317                         else if (bp->req_line_speed == SPEED_1000) {
1318                                 bnx2_disable_forced_2g5(bp);
1319                                 new_bmcr &= ~0x2000;
1320                         }
1321
1322                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1323                         if (bp->req_line_speed == SPEED_2500)
1324                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1325                         else
1326                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1327                 }
1328
1329                 if (bp->req_duplex == DUPLEX_FULL) {
1330                         adv |= ADVERTISE_1000XFULL;
1331                         new_bmcr |= BMCR_FULLDPLX;
1332                 }
1333                 else {
1334                         adv |= ADVERTISE_1000XHALF;
1335                         new_bmcr &= ~BMCR_FULLDPLX;
1336                 }
1337                 if ((new_bmcr != bmcr) || (force_link_down)) {
1338                         /* Force a link down visible on the other side */
1339                         if (bp->link_up) {
1340                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1341                                                ~(ADVERTISE_1000XFULL |
1342                                                  ADVERTISE_1000XHALF));
1343                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1344                                         BMCR_ANRESTART | BMCR_ANENABLE);
1345
1346                                 bp->link_up = 0;
1347                                 netif_carrier_off(bp->dev);
1348                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1349                                 bnx2_report_link(bp);
1350                         }
1351                         bnx2_write_phy(bp, bp->mii_adv, adv);
1352                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1353                 } else {
1354                         bnx2_resolve_flow_ctrl(bp);
1355                         bnx2_set_mac_link(bp);
1356                 }
1357                 return 0;
1358         }
1359
1360         bnx2_test_and_enable_2g5(bp);
1361
1362         if (bp->advertising & ADVERTISED_1000baseT_Full)
1363                 new_adv |= ADVERTISE_1000XFULL;
1364
1365         new_adv |= bnx2_phy_get_pause_adv(bp);
1366
1367         bnx2_read_phy(bp, bp->mii_adv, &adv);
1368         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1369
1370         bp->serdes_an_pending = 0;
1371         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1372                 /* Force a link down visible on the other side */
1373                 if (bp->link_up) {
1374                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1375                         spin_unlock_bh(&bp->phy_lock);
1376                         msleep(20);
1377                         spin_lock_bh(&bp->phy_lock);
1378                 }
1379
1380                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1381                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1382                         BMCR_ANENABLE);
1383                 /* Speed up link-up time when the link partner
1384                  * does not autonegotiate which is very common
1385                  * in blade servers. Some blade servers use
1386                  * IPMI for kerboard input and it's important
1387                  * to minimize link disruptions. Autoneg. involves
1388                  * exchanging base pages plus 3 next pages and
1389                  * normally completes in about 120 msec.
1390                  */
1391                 bp->current_interval = SERDES_AN_TIMEOUT;
1392                 bp->serdes_an_pending = 1;
1393                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1394         } else {
1395                 bnx2_resolve_flow_ctrl(bp);
1396                 bnx2_set_mac_link(bp);
1397         }
1398
1399         return 0;
1400 }
1401
1402 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1403         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1404                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1405                 (ADVERTISED_1000baseT_Full)
1406
1407 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1408         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1409         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1410         ADVERTISED_1000baseT_Full)
1411
1412 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1413         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1414
1415 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1416
1417 static void
1418 bnx2_set_default_remote_link(struct bnx2 *bp)
1419 {
1420         u32 link;
1421
1422         if (bp->phy_port == PORT_TP)
1423                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1424         else
1425                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1426
1427         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1428                 bp->req_line_speed = 0;
1429                 bp->autoneg |= AUTONEG_SPEED;
1430                 bp->advertising = ADVERTISED_Autoneg;
1431                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1432                         bp->advertising |= ADVERTISED_10baseT_Half;
1433                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1434                         bp->advertising |= ADVERTISED_10baseT_Full;
1435                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1436                         bp->advertising |= ADVERTISED_100baseT_Half;
1437                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1438                         bp->advertising |= ADVERTISED_100baseT_Full;
1439                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1440                         bp->advertising |= ADVERTISED_1000baseT_Full;
1441                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1442                         bp->advertising |= ADVERTISED_2500baseX_Full;
1443         } else {
1444                 bp->autoneg = 0;
1445                 bp->advertising = 0;
1446                 bp->req_duplex = DUPLEX_FULL;
1447                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1448                         bp->req_line_speed = SPEED_10;
1449                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1450                                 bp->req_duplex = DUPLEX_HALF;
1451                 }
1452                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1453                         bp->req_line_speed = SPEED_100;
1454                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1455                                 bp->req_duplex = DUPLEX_HALF;
1456                 }
1457                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1458                         bp->req_line_speed = SPEED_1000;
1459                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1460                         bp->req_line_speed = SPEED_2500;
1461         }
1462 }
1463
1464 static void
1465 bnx2_set_default_link(struct bnx2 *bp)
1466 {
1467         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1468                 return bnx2_set_default_remote_link(bp);
1469
1470         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1471         bp->req_line_speed = 0;
1472         if (bp->phy_flags & PHY_SERDES_FLAG) {
1473                 u32 reg;
1474
1475                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1476
1477                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1478                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1479                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1480                         bp->autoneg = 0;
1481                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1482                         bp->req_duplex = DUPLEX_FULL;
1483                 }
1484         } else
1485                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1486 }
1487
1488 static void
1489 bnx2_send_heart_beat(struct bnx2 *bp)
1490 {
1491         u32 msg;
1492         u32 addr;
1493
1494         spin_lock(&bp->indirect_lock);
1495         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1496         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1497         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1498         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1499         spin_unlock(&bp->indirect_lock);
1500 }
1501
1502 static void
1503 bnx2_remote_phy_event(struct bnx2 *bp)
1504 {
1505         u32 msg;
1506         u8 link_up = bp->link_up;
1507         u8 old_port;
1508
1509         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1510
1511         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1512                 bnx2_send_heart_beat(bp);
1513
1514         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1515
1516         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1517                 bp->link_up = 0;
1518         else {
1519                 u32 speed;
1520
1521                 bp->link_up = 1;
1522                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1523                 bp->duplex = DUPLEX_FULL;
1524                 switch (speed) {
1525                         case BNX2_LINK_STATUS_10HALF:
1526                                 bp->duplex = DUPLEX_HALF;
1527                         case BNX2_LINK_STATUS_10FULL:
1528                                 bp->line_speed = SPEED_10;
1529                                 break;
1530                         case BNX2_LINK_STATUS_100HALF:
1531                                 bp->duplex = DUPLEX_HALF;
1532                         case BNX2_LINK_STATUS_100BASE_T4:
1533                         case BNX2_LINK_STATUS_100FULL:
1534                                 bp->line_speed = SPEED_100;
1535                                 break;
1536                         case BNX2_LINK_STATUS_1000HALF:
1537                                 bp->duplex = DUPLEX_HALF;
1538                         case BNX2_LINK_STATUS_1000FULL:
1539                                 bp->line_speed = SPEED_1000;
1540                                 break;
1541                         case BNX2_LINK_STATUS_2500HALF:
1542                                 bp->duplex = DUPLEX_HALF;
1543                         case BNX2_LINK_STATUS_2500FULL:
1544                                 bp->line_speed = SPEED_2500;
1545                                 break;
1546                         default:
1547                                 bp->line_speed = 0;
1548                                 break;
1549                 }
1550
1551                 spin_lock(&bp->phy_lock);
1552                 bp->flow_ctrl = 0;
1553                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1554                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1555                         if (bp->duplex == DUPLEX_FULL)
1556                                 bp->flow_ctrl = bp->req_flow_ctrl;
1557                 } else {
1558                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1559                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1560                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1561                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1562                 }
1563
1564                 old_port = bp->phy_port;
1565                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1566                         bp->phy_port = PORT_FIBRE;
1567                 else
1568                         bp->phy_port = PORT_TP;
1569
1570                 if (old_port != bp->phy_port)
1571                         bnx2_set_default_link(bp);
1572
1573                 spin_unlock(&bp->phy_lock);
1574         }
1575         if (bp->link_up != link_up)
1576                 bnx2_report_link(bp);
1577
1578         bnx2_set_mac_link(bp);
1579 }
1580
1581 static int
1582 bnx2_set_remote_link(struct bnx2 *bp)
1583 {
1584         u32 evt_code;
1585
1586         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1587         switch (evt_code) {
1588                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1589                         bnx2_remote_phy_event(bp);
1590                         break;
1591                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1592                 default:
1593                         bnx2_send_heart_beat(bp);
1594                         break;
1595         }
1596         return 0;
1597 }
1598
1599 static int
1600 bnx2_setup_copper_phy(struct bnx2 *bp)
1601 {
1602         u32 bmcr;
1603         u32 new_bmcr;
1604
1605         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1606
1607         if (bp->autoneg & AUTONEG_SPEED) {
1608                 u32 adv_reg, adv1000_reg;
1609                 u32 new_adv_reg = 0;
1610                 u32 new_adv1000_reg = 0;
1611
1612                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1613                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1614                         ADVERTISE_PAUSE_ASYM);
1615
1616                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1617                 adv1000_reg &= PHY_ALL_1000_SPEED;
1618
1619                 if (bp->advertising & ADVERTISED_10baseT_Half)
1620                         new_adv_reg |= ADVERTISE_10HALF;
1621                 if (bp->advertising & ADVERTISED_10baseT_Full)
1622                         new_adv_reg |= ADVERTISE_10FULL;
1623                 if (bp->advertising & ADVERTISED_100baseT_Half)
1624                         new_adv_reg |= ADVERTISE_100HALF;
1625                 if (bp->advertising & ADVERTISED_100baseT_Full)
1626                         new_adv_reg |= ADVERTISE_100FULL;
1627                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1628                         new_adv1000_reg |= ADVERTISE_1000FULL;
1629
1630                 new_adv_reg |= ADVERTISE_CSMA;
1631
1632                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1633
1634                 if ((adv1000_reg != new_adv1000_reg) ||
1635                         (adv_reg != new_adv_reg) ||
1636                         ((bmcr & BMCR_ANENABLE) == 0)) {
1637
1638                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1639                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1640                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1641                                 BMCR_ANENABLE);
1642                 }
1643                 else if (bp->link_up) {
1644                         /* Flow ctrl may have changed from auto to forced */
1645                         /* or vice-versa. */
1646
1647                         bnx2_resolve_flow_ctrl(bp);
1648                         bnx2_set_mac_link(bp);
1649                 }
1650                 return 0;
1651         }
1652
1653         new_bmcr = 0;
1654         if (bp->req_line_speed == SPEED_100) {
1655                 new_bmcr |= BMCR_SPEED100;
1656         }
1657         if (bp->req_duplex == DUPLEX_FULL) {
1658                 new_bmcr |= BMCR_FULLDPLX;
1659         }
1660         if (new_bmcr != bmcr) {
1661                 u32 bmsr;
1662
1663                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1664                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1665
1666                 if (bmsr & BMSR_LSTATUS) {
1667                         /* Force link down */
1668                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1669                         spin_unlock_bh(&bp->phy_lock);
1670                         msleep(50);
1671                         spin_lock_bh(&bp->phy_lock);
1672
1673                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1674                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1675                 }
1676
1677                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1678
1679                 /* Normally, the new speed is setup after the link has
1680                  * gone down and up again. In some cases, link will not go
1681                  * down so we need to set up the new speed here.
1682                  */
1683                 if (bmsr & BMSR_LSTATUS) {
1684                         bp->line_speed = bp->req_line_speed;
1685                         bp->duplex = bp->req_duplex;
1686                         bnx2_resolve_flow_ctrl(bp);
1687                         bnx2_set_mac_link(bp);
1688                 }
1689         } else {
1690                 bnx2_resolve_flow_ctrl(bp);
1691                 bnx2_set_mac_link(bp);
1692         }
1693         return 0;
1694 }
1695
1696 static int
1697 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1698 {
1699         if (bp->loopback == MAC_LOOPBACK)
1700                 return 0;
1701
1702         if (bp->phy_flags & PHY_SERDES_FLAG) {
1703                 return (bnx2_setup_serdes_phy(bp, port));
1704         }
1705         else {
1706                 return (bnx2_setup_copper_phy(bp));
1707         }
1708 }
1709
1710 static int
1711 bnx2_init_5709s_phy(struct bnx2 *bp)
1712 {
1713         u32 val;
1714
1715         bp->mii_bmcr = MII_BMCR + 0x10;
1716         bp->mii_bmsr = MII_BMSR + 0x10;
1717         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1718         bp->mii_adv = MII_ADVERTISE + 0x10;
1719         bp->mii_lpa = MII_LPA + 0x10;
1720         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1721
1722         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1723         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1724
1725         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1726         bnx2_reset_phy(bp);
1727
1728         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1729
1730         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1731         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1732         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1733         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1734
1735         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1736         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1737         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1738                 val |= BCM5708S_UP1_2G5;
1739         else
1740                 val &= ~BCM5708S_UP1_2G5;
1741         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1742
1743         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1744         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1745         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1746         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1747
1748         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1749
1750         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1751               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1752         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1753
1754         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1755
1756         return 0;
1757 }
1758
1759 static int
1760 bnx2_init_5708s_phy(struct bnx2 *bp)
1761 {
1762         u32 val;
1763
1764         bnx2_reset_phy(bp);
1765
1766         bp->mii_up1 = BCM5708S_UP1;
1767
1768         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1769         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1770         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1771
1772         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1773         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1774         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1775
1776         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1777         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1778         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1779
1780         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1781                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1782                 val |= BCM5708S_UP1_2G5;
1783                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1784         }
1785
1786         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1787             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1788             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1789                 /* increase tx signal amplitude */
1790                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1791                                BCM5708S_BLK_ADDR_TX_MISC);
1792                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1793                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1794                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1795                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1796         }
1797
1798         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1799               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1800
1801         if (val) {
1802                 u32 is_backplane;
1803
1804                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1805                                           BNX2_SHARED_HW_CFG_CONFIG);
1806                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1807                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1808                                        BCM5708S_BLK_ADDR_TX_MISC);
1809                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1810                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1811                                        BCM5708S_BLK_ADDR_DIG);
1812                 }
1813         }
1814         return 0;
1815 }
1816
1817 static int
1818 bnx2_init_5706s_phy(struct bnx2 *bp)
1819 {
1820         bnx2_reset_phy(bp);
1821
1822         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1823
1824         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1825                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1826
1827         if (bp->dev->mtu > 1500) {
1828                 u32 val;
1829
1830                 /* Set extended packet length bit */
1831                 bnx2_write_phy(bp, 0x18, 0x7);
1832                 bnx2_read_phy(bp, 0x18, &val);
1833                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1834
1835                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1836                 bnx2_read_phy(bp, 0x1c, &val);
1837                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1838         }
1839         else {
1840                 u32 val;
1841
1842                 bnx2_write_phy(bp, 0x18, 0x7);
1843                 bnx2_read_phy(bp, 0x18, &val);
1844                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1845
1846                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1847                 bnx2_read_phy(bp, 0x1c, &val);
1848                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1849         }
1850
1851         return 0;
1852 }
1853
1854 static int
1855 bnx2_init_copper_phy(struct bnx2 *bp)
1856 {
1857         u32 val;
1858
1859         bnx2_reset_phy(bp);
1860
1861         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1862                 bnx2_write_phy(bp, 0x18, 0x0c00);
1863                 bnx2_write_phy(bp, 0x17, 0x000a);
1864                 bnx2_write_phy(bp, 0x15, 0x310b);
1865                 bnx2_write_phy(bp, 0x17, 0x201f);
1866                 bnx2_write_phy(bp, 0x15, 0x9506);
1867                 bnx2_write_phy(bp, 0x17, 0x401f);
1868                 bnx2_write_phy(bp, 0x15, 0x14e2);
1869                 bnx2_write_phy(bp, 0x18, 0x0400);
1870         }
1871
1872         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1873                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1874                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1875                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1876                 val &= ~(1 << 8);
1877                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1878         }
1879
1880         if (bp->dev->mtu > 1500) {
1881                 /* Set extended packet length bit */
1882                 bnx2_write_phy(bp, 0x18, 0x7);
1883                 bnx2_read_phy(bp, 0x18, &val);
1884                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1885
1886                 bnx2_read_phy(bp, 0x10, &val);
1887                 bnx2_write_phy(bp, 0x10, val | 0x1);
1888         }
1889         else {
1890                 bnx2_write_phy(bp, 0x18, 0x7);
1891                 bnx2_read_phy(bp, 0x18, &val);
1892                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1893
1894                 bnx2_read_phy(bp, 0x10, &val);
1895                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1896         }
1897
1898         /* ethernet@wirespeed */
1899         bnx2_write_phy(bp, 0x18, 0x7007);
1900         bnx2_read_phy(bp, 0x18, &val);
1901         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1902         return 0;
1903 }
1904
1905
1906 static int
1907 bnx2_init_phy(struct bnx2 *bp)
1908 {
1909         u32 val;
1910         int rc = 0;
1911
1912         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1913         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1914
1915         bp->mii_bmcr = MII_BMCR;
1916         bp->mii_bmsr = MII_BMSR;
1917         bp->mii_bmsr1 = MII_BMSR;
1918         bp->mii_adv = MII_ADVERTISE;
1919         bp->mii_lpa = MII_LPA;
1920
1921         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1922
1923         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1924                 goto setup_phy;
1925
1926         bnx2_read_phy(bp, MII_PHYSID1, &val);
1927         bp->phy_id = val << 16;
1928         bnx2_read_phy(bp, MII_PHYSID2, &val);
1929         bp->phy_id |= val & 0xffff;
1930
1931         if (bp->phy_flags & PHY_SERDES_FLAG) {
1932                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1933                         rc = bnx2_init_5706s_phy(bp);
1934                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1935                         rc = bnx2_init_5708s_phy(bp);
1936                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1937                         rc = bnx2_init_5709s_phy(bp);
1938         }
1939         else {
1940                 rc = bnx2_init_copper_phy(bp);
1941         }
1942
1943 setup_phy:
1944         if (!rc)
1945                 rc = bnx2_setup_phy(bp, bp->phy_port);
1946
1947         return rc;
1948 }
1949
1950 static int
1951 bnx2_set_mac_loopback(struct bnx2 *bp)
1952 {
1953         u32 mac_mode;
1954
1955         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1956         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1957         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1958         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1959         bp->link_up = 1;
1960         return 0;
1961 }
1962
1963 static int bnx2_test_link(struct bnx2 *);
1964
1965 static int
1966 bnx2_set_phy_loopback(struct bnx2 *bp)
1967 {
1968         u32 mac_mode;
1969         int rc, i;
1970
1971         spin_lock_bh(&bp->phy_lock);
1972         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1973                             BMCR_SPEED1000);
1974         spin_unlock_bh(&bp->phy_lock);
1975         if (rc)
1976                 return rc;
1977
1978         for (i = 0; i < 10; i++) {
1979                 if (bnx2_test_link(bp) == 0)
1980                         break;
1981                 msleep(100);
1982         }
1983
1984         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1985         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1986                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1987                       BNX2_EMAC_MODE_25G_MODE);
1988
1989         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1990         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1991         bp->link_up = 1;
1992         return 0;
1993 }
1994
1995 static int
1996 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1997 {
1998         int i;
1999         u32 val;
2000
2001         bp->fw_wr_seq++;
2002         msg_data |= bp->fw_wr_seq;
2003
2004         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2005
2006         /* wait for an acknowledgement. */
2007         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2008                 msleep(10);
2009
2010                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2011
2012                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2013                         break;
2014         }
2015         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2016                 return 0;
2017
2018         /* If we timed out, inform the firmware that this is the case. */
2019         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2020                 if (!silent)
2021                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2022                                             "%x\n", msg_data);
2023
2024                 msg_data &= ~BNX2_DRV_MSG_CODE;
2025                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2026
2027                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2028
2029                 return -EBUSY;
2030         }
2031
2032         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2033                 return -EIO;
2034
2035         return 0;
2036 }
2037
2038 static int
2039 bnx2_init_5709_context(struct bnx2 *bp)
2040 {
2041         int i, ret = 0;
2042         u32 val;
2043
2044         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2045         val |= (BCM_PAGE_BITS - 8) << 16;
2046         REG_WR(bp, BNX2_CTX_COMMAND, val);
2047         for (i = 0; i < 10; i++) {
2048                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2049                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2050                         break;
2051                 udelay(2);
2052         }
2053         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2054                 return -EBUSY;
2055
2056         for (i = 0; i < bp->ctx_pages; i++) {
2057                 int j;
2058
2059                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2060                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2061                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2062                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2063                        (u64) bp->ctx_blk_mapping[i] >> 32);
2064                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2065                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2066                 for (j = 0; j < 10; j++) {
2067
2068                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2069                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2070                                 break;
2071                         udelay(5);
2072                 }
2073                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2074                         ret = -EBUSY;
2075                         break;
2076                 }
2077         }
2078         return ret;
2079 }
2080
2081 static void
2082 bnx2_init_context(struct bnx2 *bp)
2083 {
2084         u32 vcid;
2085
2086         vcid = 96;
2087         while (vcid) {
2088                 u32 vcid_addr, pcid_addr, offset;
2089                 int i;
2090
2091                 vcid--;
2092
2093                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2094                         u32 new_vcid;
2095
2096                         vcid_addr = GET_PCID_ADDR(vcid);
2097                         if (vcid & 0x8) {
2098                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2099                         }
2100                         else {
2101                                 new_vcid = vcid;
2102                         }
2103                         pcid_addr = GET_PCID_ADDR(new_vcid);
2104                 }
2105                 else {
2106                         vcid_addr = GET_CID_ADDR(vcid);
2107                         pcid_addr = vcid_addr;
2108                 }
2109
2110                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2111                         vcid_addr += (i << PHY_CTX_SHIFT);
2112                         pcid_addr += (i << PHY_CTX_SHIFT);
2113
2114                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2115                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2116
2117                         /* Zero out the context. */
2118                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2119                                 CTX_WR(bp, 0x00, offset, 0);
2120
2121                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2122                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2123                 }
2124         }
2125 }
2126
2127 static int
2128 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2129 {
2130         u16 *good_mbuf;
2131         u32 good_mbuf_cnt;
2132         u32 val;
2133
2134         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2135         if (good_mbuf == NULL) {
2136                 printk(KERN_ERR PFX "Failed to allocate memory in "
2137                                     "bnx2_alloc_bad_rbuf\n");
2138                 return -ENOMEM;
2139         }
2140
2141         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2142                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2143
2144         good_mbuf_cnt = 0;
2145
2146         /* Allocate a bunch of mbufs and save the good ones in an array. */
2147         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2148         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2149                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2150
2151                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2152
2153                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2154
2155                 /* The addresses with Bit 9 set are bad memory blocks. */
2156                 if (!(val & (1 << 9))) {
2157                         good_mbuf[good_mbuf_cnt] = (u16) val;
2158                         good_mbuf_cnt++;
2159                 }
2160
2161                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2162         }
2163
2164         /* Free the good ones back to the mbuf pool thus discarding
2165          * all the bad ones. */
2166         while (good_mbuf_cnt) {
2167                 good_mbuf_cnt--;
2168
2169                 val = good_mbuf[good_mbuf_cnt];
2170                 val = (val << 9) | val | 1;
2171
2172                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2173         }
2174         kfree(good_mbuf);
2175         return 0;
2176 }
2177
2178 static void
2179 bnx2_set_mac_addr(struct bnx2 *bp)
2180 {
2181         u32 val;
2182         u8 *mac_addr = bp->dev->dev_addr;
2183
2184         val = (mac_addr[0] << 8) | mac_addr[1];
2185
2186         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2187
2188         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2189                 (mac_addr[4] << 8) | mac_addr[5];
2190
2191         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2192 }
2193
2194 static inline int
2195 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2196 {
2197         struct sk_buff *skb;
2198         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2199         dma_addr_t mapping;
2200         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2201         unsigned long align;
2202
2203         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2204         if (skb == NULL) {
2205                 return -ENOMEM;
2206         }
2207
2208         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2209                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2210
2211         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2212                 PCI_DMA_FROMDEVICE);
2213
2214         rx_buf->skb = skb;
2215         pci_unmap_addr_set(rx_buf, mapping, mapping);
2216
2217         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2218         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2219
2220         bp->rx_prod_bseq += bp->rx_buf_use_size;
2221
2222         return 0;
2223 }
2224
2225 static int
2226 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2227 {
2228         struct status_block *sblk = bp->status_blk;
2229         u32 new_link_state, old_link_state;
2230         int is_set = 1;
2231
2232         new_link_state = sblk->status_attn_bits & event;
2233         old_link_state = sblk->status_attn_bits_ack & event;
2234         if (new_link_state != old_link_state) {
2235                 if (new_link_state)
2236                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2237                 else
2238                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2239         } else
2240                 is_set = 0;
2241
2242         return is_set;
2243 }
2244
2245 static void
2246 bnx2_phy_int(struct bnx2 *bp)
2247 {
2248         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2249                 spin_lock(&bp->phy_lock);
2250                 bnx2_set_link(bp);
2251                 spin_unlock(&bp->phy_lock);
2252         }
2253         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2254                 bnx2_set_remote_link(bp);
2255
2256 }
2257
2258 static void
2259 bnx2_tx_int(struct bnx2 *bp)
2260 {
2261         struct status_block *sblk = bp->status_blk;
2262         u16 hw_cons, sw_cons, sw_ring_cons;
2263         int tx_free_bd = 0;
2264
2265         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2266         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2267                 hw_cons++;
2268         }
2269         sw_cons = bp->tx_cons;
2270
2271         while (sw_cons != hw_cons) {
2272                 struct sw_bd *tx_buf;
2273                 struct sk_buff *skb;
2274                 int i, last;
2275
2276                 sw_ring_cons = TX_RING_IDX(sw_cons);
2277
2278                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2279                 skb = tx_buf->skb;
2280
2281                 /* partial BD completions possible with TSO packets */
2282                 if (skb_is_gso(skb)) {
2283                         u16 last_idx, last_ring_idx;
2284
2285                         last_idx = sw_cons +
2286                                 skb_shinfo(skb)->nr_frags + 1;
2287                         last_ring_idx = sw_ring_cons +
2288                                 skb_shinfo(skb)->nr_frags + 1;
2289                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2290                                 last_idx++;
2291                         }
2292                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2293                                 break;
2294                         }
2295                 }
2296
2297                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2298                         skb_headlen(skb), PCI_DMA_TODEVICE);
2299
2300                 tx_buf->skb = NULL;
2301                 last = skb_shinfo(skb)->nr_frags;
2302
2303                 for (i = 0; i < last; i++) {
2304                         sw_cons = NEXT_TX_BD(sw_cons);
2305
2306                         pci_unmap_page(bp->pdev,
2307                                 pci_unmap_addr(
2308                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2309                                         mapping),
2310                                 skb_shinfo(skb)->frags[i].size,
2311                                 PCI_DMA_TODEVICE);
2312                 }
2313
2314                 sw_cons = NEXT_TX_BD(sw_cons);
2315
2316                 tx_free_bd += last + 1;
2317
2318                 dev_kfree_skb(skb);
2319
2320                 hw_cons = bp->hw_tx_cons =
2321                         sblk->status_tx_quick_consumer_index0;
2322
2323                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2324                         hw_cons++;
2325                 }
2326         }
2327
2328         bp->tx_cons = sw_cons;
2329         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2330          * before checking for netif_queue_stopped().  Without the
2331          * memory barrier, there is a small possibility that bnx2_start_xmit()
2332          * will miss it and cause the queue to be stopped forever.
2333          */
2334         smp_mb();
2335
2336         if (unlikely(netif_queue_stopped(bp->dev)) &&
2337                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2338                 netif_tx_lock(bp->dev);
2339                 if ((netif_queue_stopped(bp->dev)) &&
2340                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2341                         netif_wake_queue(bp->dev);
2342                 netif_tx_unlock(bp->dev);
2343         }
2344 }
2345
2346 static inline void
2347 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2348         u16 cons, u16 prod)
2349 {
2350         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2351         struct rx_bd *cons_bd, *prod_bd;
2352
2353         cons_rx_buf = &bp->rx_buf_ring[cons];
2354         prod_rx_buf = &bp->rx_buf_ring[prod];
2355
2356         pci_dma_sync_single_for_device(bp->pdev,
2357                 pci_unmap_addr(cons_rx_buf, mapping),
2358                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2359
2360         bp->rx_prod_bseq += bp->rx_buf_use_size;
2361
2362         prod_rx_buf->skb = skb;
2363
2364         if (cons == prod)
2365                 return;
2366
2367         pci_unmap_addr_set(prod_rx_buf, mapping,
2368                         pci_unmap_addr(cons_rx_buf, mapping));
2369
2370         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2371         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2372         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2373         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2374 }
2375
2376 static int
2377 bnx2_rx_int(struct bnx2 *bp, int budget)
2378 {
2379         struct status_block *sblk = bp->status_blk;
2380         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2381         struct l2_fhdr *rx_hdr;
2382         int rx_pkt = 0;
2383
2384         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2385         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2386                 hw_cons++;
2387         }
2388         sw_cons = bp->rx_cons;
2389         sw_prod = bp->rx_prod;
2390
2391         /* Memory barrier necessary as speculative reads of the rx
2392          * buffer can be ahead of the index in the status block
2393          */
2394         rmb();
2395         while (sw_cons != hw_cons) {
2396                 unsigned int len;
2397                 u32 status;
2398                 struct sw_bd *rx_buf;
2399                 struct sk_buff *skb;
2400                 dma_addr_t dma_addr;
2401
2402                 sw_ring_cons = RX_RING_IDX(sw_cons);
2403                 sw_ring_prod = RX_RING_IDX(sw_prod);
2404
2405                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2406                 skb = rx_buf->skb;
2407
2408                 rx_buf->skb = NULL;
2409
2410                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2411
2412                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2413                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2414
2415                 rx_hdr = (struct l2_fhdr *) skb->data;
2416                 len = rx_hdr->l2_fhdr_pkt_len - 4;
2417
2418                 if ((status = rx_hdr->l2_fhdr_status) &
2419                         (L2_FHDR_ERRORS_BAD_CRC |
2420                         L2_FHDR_ERRORS_PHY_DECODE |
2421                         L2_FHDR_ERRORS_ALIGNMENT |
2422                         L2_FHDR_ERRORS_TOO_SHORT |
2423                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2424
2425                         goto reuse_rx;
2426                 }
2427
2428                 /* Since we don't have a jumbo ring, copy small packets
2429                  * if mtu > 1500
2430                  */
2431                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2432                         struct sk_buff *new_skb;
2433
2434                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2435                         if (new_skb == NULL)
2436                                 goto reuse_rx;
2437
2438                         /* aligned copy */
2439                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2440                                       new_skb->data, len + 2);
2441                         skb_reserve(new_skb, 2);
2442                         skb_put(new_skb, len);
2443
2444                         bnx2_reuse_rx_skb(bp, skb,
2445                                 sw_ring_cons, sw_ring_prod);
2446
2447                         skb = new_skb;
2448                 }
2449                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2450                         pci_unmap_single(bp->pdev, dma_addr,
2451                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2452
2453                         skb_reserve(skb, bp->rx_offset);
2454                         skb_put(skb, len);
2455                 }
2456                 else {
2457 reuse_rx:
2458                         bnx2_reuse_rx_skb(bp, skb,
2459                                 sw_ring_cons, sw_ring_prod);
2460                         goto next_rx;
2461                 }
2462
2463                 skb->protocol = eth_type_trans(skb, bp->dev);
2464
2465                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2466                         (ntohs(skb->protocol) != 0x8100)) {
2467
2468                         dev_kfree_skb(skb);
2469                         goto next_rx;
2470
2471                 }
2472
2473                 skb->ip_summed = CHECKSUM_NONE;
2474                 if (bp->rx_csum &&
2475                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2476                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2477
2478                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2479                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2480                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2481                 }
2482
2483 #ifdef BCM_VLAN
2484                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2485                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2486                                 rx_hdr->l2_fhdr_vlan_tag);
2487                 }
2488                 else
2489 #endif
2490                         netif_receive_skb(skb);
2491
2492                 bp->dev->last_rx = jiffies;
2493                 rx_pkt++;
2494
2495 next_rx:
2496                 sw_cons = NEXT_RX_BD(sw_cons);
2497                 sw_prod = NEXT_RX_BD(sw_prod);
2498
2499                 if ((rx_pkt == budget))
2500                         break;
2501
2502                 /* Refresh hw_cons to see if there is new work */
2503                 if (sw_cons == hw_cons) {
2504                         hw_cons = bp->hw_rx_cons =
2505                                 sblk->status_rx_quick_consumer_index0;
2506                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2507                                 hw_cons++;
2508                         rmb();
2509                 }
2510         }
2511         bp->rx_cons = sw_cons;
2512         bp->rx_prod = sw_prod;
2513
2514         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2515
2516         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2517
2518         mmiowb();
2519
2520         return rx_pkt;
2521
2522 }
2523
2524 /* MSI ISR - The only difference between this and the INTx ISR
2525  * is that the MSI interrupt is always serviced.
2526  */
2527 static irqreturn_t
2528 bnx2_msi(int irq, void *dev_instance)
2529 {
2530         struct net_device *dev = dev_instance;
2531         struct bnx2 *bp = netdev_priv(dev);
2532
2533         prefetch(bp->status_blk);
2534         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2535                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2536                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2537
2538         /* Return here if interrupt is disabled. */
2539         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2540                 return IRQ_HANDLED;
2541
2542         netif_rx_schedule(dev);
2543
2544         return IRQ_HANDLED;
2545 }
2546
2547 static irqreturn_t
2548 bnx2_msi_1shot(int irq, void *dev_instance)
2549 {
2550         struct net_device *dev = dev_instance;
2551         struct bnx2 *bp = netdev_priv(dev);
2552
2553         prefetch(bp->status_blk);
2554
2555         /* Return here if interrupt is disabled. */
2556         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2557                 return IRQ_HANDLED;
2558
2559         netif_rx_schedule(dev);
2560
2561         return IRQ_HANDLED;
2562 }
2563
2564 static irqreturn_t
2565 bnx2_interrupt(int irq, void *dev_instance)
2566 {
2567         struct net_device *dev = dev_instance;
2568         struct bnx2 *bp = netdev_priv(dev);
2569         struct status_block *sblk = bp->status_blk;
2570
2571         /* When using INTx, it is possible for the interrupt to arrive
2572          * at the CPU before the status block posted prior to the
2573          * interrupt. Reading a register will flush the status block.
2574          * When using MSI, the MSI message will always complete after
2575          * the status block write.
2576          */
2577         if ((sblk->status_idx == bp->last_status_idx) &&
2578             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2579              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2580                 return IRQ_NONE;
2581
2582         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2583                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2584                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2585
2586         /* Read back to deassert IRQ immediately to avoid too many
2587          * spurious interrupts.
2588          */
2589         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2590
2591         /* Return here if interrupt is shared and is disabled. */
2592         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2593                 return IRQ_HANDLED;
2594
2595         if (netif_rx_schedule_prep(dev)) {
2596                 bp->last_status_idx = sblk->status_idx;
2597                 __netif_rx_schedule(dev);
2598         }
2599
2600         return IRQ_HANDLED;
2601 }
2602
2603 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2604                                  STATUS_ATTN_BITS_TIMER_ABORT)
2605
2606 static inline int
2607 bnx2_has_work(struct bnx2 *bp)
2608 {
2609         struct status_block *sblk = bp->status_blk;
2610
2611         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2612             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2613                 return 1;
2614
2615         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2616             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2617                 return 1;
2618
2619         return 0;
2620 }
2621
2622 static int
2623 bnx2_poll(struct net_device *dev, int *budget)
2624 {
2625         struct bnx2 *bp = netdev_priv(dev);
2626         struct status_block *sblk = bp->status_blk;
2627         u32 status_attn_bits = sblk->status_attn_bits;
2628         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2629
2630         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2631             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2632
2633                 bnx2_phy_int(bp);
2634
2635                 /* This is needed to take care of transient status
2636                  * during link changes.
2637                  */
2638                 REG_WR(bp, BNX2_HC_COMMAND,
2639                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2640                 REG_RD(bp, BNX2_HC_COMMAND);
2641         }
2642
2643         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2644                 bnx2_tx_int(bp);
2645
2646         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2647                 int orig_budget = *budget;
2648                 int work_done;
2649
2650                 if (orig_budget > dev->quota)
2651                         orig_budget = dev->quota;
2652
2653                 work_done = bnx2_rx_int(bp, orig_budget);
2654                 *budget -= work_done;
2655                 dev->quota -= work_done;
2656         }
2657
2658         bp->last_status_idx = bp->status_blk->status_idx;
2659         rmb();
2660
2661         if (!bnx2_has_work(bp)) {
2662                 netif_rx_complete(dev);
2663                 if (likely(bp->flags & USING_MSI_FLAG)) {
2664                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2665                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2666                                bp->last_status_idx);
2667                         return 0;
2668                 }
2669                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2670                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2671                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2672                        bp->last_status_idx);
2673
2674                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2675                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2676                        bp->last_status_idx);
2677                 return 0;
2678         }
2679
2680         return 1;
2681 }
2682
2683 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2684  * from set_multicast.
2685  */
2686 static void
2687 bnx2_set_rx_mode(struct net_device *dev)
2688 {
2689         struct bnx2 *bp = netdev_priv(dev);
2690         u32 rx_mode, sort_mode;
2691         int i;
2692
2693         spin_lock_bh(&bp->phy_lock);
2694
2695         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2696                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2697         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2698 #ifdef BCM_VLAN
2699         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2700                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2701 #else
2702         if (!(bp->flags & ASF_ENABLE_FLAG))
2703                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2704 #endif
2705         if (dev->flags & IFF_PROMISC) {
2706                 /* Promiscuous mode. */
2707                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2708                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2709                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2710         }
2711         else if (dev->flags & IFF_ALLMULTI) {
2712                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2713                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2714                                0xffffffff);
2715                 }
2716                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2717         }
2718         else {
2719                 /* Accept one or more multicast(s). */
2720                 struct dev_mc_list *mclist;
2721                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2722                 u32 regidx;
2723                 u32 bit;
2724                 u32 crc;
2725
2726                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2727
2728                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2729                      i++, mclist = mclist->next) {
2730
2731                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2732                         bit = crc & 0xff;
2733                         regidx = (bit & 0xe0) >> 5;
2734                         bit &= 0x1f;
2735                         mc_filter[regidx] |= (1 << bit);
2736                 }
2737
2738                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2739                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2740                                mc_filter[i]);
2741                 }
2742
2743                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2744         }
2745
2746         if (rx_mode != bp->rx_mode) {
2747                 bp->rx_mode = rx_mode;
2748                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2749         }
2750
2751         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2752         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2753         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2754
2755         spin_unlock_bh(&bp->phy_lock);
2756 }
2757
2758 #define FW_BUF_SIZE     0x8000
2759
2760 static int
2761 bnx2_gunzip_init(struct bnx2 *bp)
2762 {
2763         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2764                 goto gunzip_nomem1;
2765
2766         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2767                 goto gunzip_nomem2;
2768
2769         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2770         if (bp->strm->workspace == NULL)
2771                 goto gunzip_nomem3;
2772
2773         return 0;
2774
2775 gunzip_nomem3:
2776         kfree(bp->strm);
2777         bp->strm = NULL;
2778
2779 gunzip_nomem2:
2780         vfree(bp->gunzip_buf);
2781         bp->gunzip_buf = NULL;
2782
2783 gunzip_nomem1:
2784         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2785                             "uncompression.\n", bp->dev->name);
2786         return -ENOMEM;
2787 }
2788
2789 static void
2790 bnx2_gunzip_end(struct bnx2 *bp)
2791 {
2792         kfree(bp->strm->workspace);
2793
2794         kfree(bp->strm);
2795         bp->strm = NULL;
2796
2797         if (bp->gunzip_buf) {
2798                 vfree(bp->gunzip_buf);
2799                 bp->gunzip_buf = NULL;
2800         }
2801 }
2802
2803 static int
2804 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2805 {
2806         int n, rc;
2807
2808         /* check gzip header */
2809         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2810                 return -EINVAL;
2811
2812         n = 10;
2813
2814 #define FNAME   0x8
2815         if (zbuf[3] & FNAME)
2816                 while ((zbuf[n++] != 0) && (n < len));
2817
2818         bp->strm->next_in = zbuf + n;
2819         bp->strm->avail_in = len - n;
2820         bp->strm->next_out = bp->gunzip_buf;
2821         bp->strm->avail_out = FW_BUF_SIZE;
2822
2823         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2824         if (rc != Z_OK)
2825                 return rc;
2826
2827         rc = zlib_inflate(bp->strm, Z_FINISH);
2828
2829         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2830         *outbuf = bp->gunzip_buf;
2831
2832         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2833                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2834                        bp->dev->name, bp->strm->msg);
2835
2836         zlib_inflateEnd(bp->strm);
2837
2838         if (rc == Z_STREAM_END)
2839                 return 0;
2840
2841         return rc;
2842 }
2843
2844 static void
2845 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2846         u32 rv2p_proc)
2847 {
2848         int i;
2849         u32 val;
2850
2851
2852         for (i = 0; i < rv2p_code_len; i += 8) {
2853                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2854                 rv2p_code++;
2855                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2856                 rv2p_code++;
2857
2858                 if (rv2p_proc == RV2P_PROC1) {
2859                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2860                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2861                 }
2862                 else {
2863                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2864                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2865                 }
2866         }
2867
2868         /* Reset the processor, un-stall is done later. */
2869         if (rv2p_proc == RV2P_PROC1) {
2870                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2871         }
2872         else {
2873                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2874         }
2875 }
2876
2877 static int
2878 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2879 {
2880         u32 offset;
2881         u32 val;
2882         int rc;
2883
2884         /* Halt the CPU. */
2885         val = REG_RD_IND(bp, cpu_reg->mode);
2886         val |= cpu_reg->mode_value_halt;
2887         REG_WR_IND(bp, cpu_reg->mode, val);
2888         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2889
2890         /* Load the Text area. */
2891         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2892         if (fw->gz_text) {
2893                 u32 text_len;
2894                 void *text;
2895
2896                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2897                                  &text_len);
2898                 if (rc)
2899                         return rc;
2900
2901                 fw->text = text;
2902         }
2903         if (fw->gz_text) {
2904                 int j;
2905
2906                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2907                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2908                 }
2909         }
2910
2911         /* Load the Data area. */
2912         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2913         if (fw->data) {
2914                 int j;
2915
2916                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2917                         REG_WR_IND(bp, offset, fw->data[j]);
2918                 }
2919         }
2920
2921         /* Load the SBSS area. */
2922         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2923         if (fw->sbss) {
2924                 int j;
2925
2926                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2927                         REG_WR_IND(bp, offset, fw->sbss[j]);
2928                 }
2929         }
2930
2931         /* Load the BSS area. */
2932         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2933         if (fw->bss) {
2934                 int j;
2935
2936                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2937                         REG_WR_IND(bp, offset, fw->bss[j]);
2938                 }
2939         }
2940
2941         /* Load the Read-Only area. */
2942         offset = cpu_reg->spad_base +
2943                 (fw->rodata_addr - cpu_reg->mips_view_base);
2944         if (fw->rodata) {
2945                 int j;
2946
2947                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2948                         REG_WR_IND(bp, offset, fw->rodata[j]);
2949                 }
2950         }
2951
2952         /* Clear the pre-fetch instruction. */
2953         REG_WR_IND(bp, cpu_reg->inst, 0);
2954         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2955
2956         /* Start the CPU. */
2957         val = REG_RD_IND(bp, cpu_reg->mode);
2958         val &= ~cpu_reg->mode_value_halt;
2959         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2960         REG_WR_IND(bp, cpu_reg->mode, val);
2961
2962         return 0;
2963 }
2964
2965 static int
2966 bnx2_init_cpus(struct bnx2 *bp)
2967 {
2968         struct cpu_reg cpu_reg;
2969         struct fw_info *fw;
2970         int rc = 0;
2971         void *text;
2972         u32 text_len;
2973
2974         if ((rc = bnx2_gunzip_init(bp)) != 0)
2975                 return rc;
2976
2977         /* Initialize the RV2P processor. */
2978         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2979                          &text_len);
2980         if (rc)
2981                 goto init_cpu_err;
2982
2983         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2984
2985         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2986                          &text_len);
2987         if (rc)
2988                 goto init_cpu_err;
2989
2990         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2991
2992         /* Initialize the RX Processor. */
2993         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2994         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2995         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2996         cpu_reg.state = BNX2_RXP_CPU_STATE;
2997         cpu_reg.state_value_clear = 0xffffff;
2998         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2999         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3000         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3001         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3002         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3003         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3004         cpu_reg.mips_view_base = 0x8000000;
3005
3006         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3007                 fw = &bnx2_rxp_fw_09;
3008         else
3009                 fw = &bnx2_rxp_fw_06;
3010
3011         rc = load_cpu_fw(bp, &cpu_reg, fw);
3012         if (rc)
3013                 goto init_cpu_err;
3014
3015         /* Initialize the TX Processor. */
3016         cpu_reg.mode = BNX2_TXP_CPU_MODE;
3017         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3018         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3019         cpu_reg.state = BNX2_TXP_CPU_STATE;
3020         cpu_reg.state_value_clear = 0xffffff;
3021         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3022         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3023         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3024         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3025         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3026         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3027         cpu_reg.mips_view_base = 0x8000000;
3028
3029         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3030                 fw = &bnx2_txp_fw_09;
3031         else
3032                 fw = &bnx2_txp_fw_06;
3033
3034         rc = load_cpu_fw(bp, &cpu_reg, fw);
3035         if (rc)
3036                 goto init_cpu_err;
3037
3038         /* Initialize the TX Patch-up Processor. */
3039         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3040         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3041         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3042         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3043         cpu_reg.state_value_clear = 0xffffff;
3044         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3045         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3046         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3047         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3048         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3049         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3050         cpu_reg.mips_view_base = 0x8000000;
3051
3052         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3053                 fw = &bnx2_tpat_fw_09;
3054         else
3055                 fw = &bnx2_tpat_fw_06;
3056
3057         rc = load_cpu_fw(bp, &cpu_reg, fw);
3058         if (rc)
3059                 goto init_cpu_err;
3060
3061         /* Initialize the Completion Processor. */
3062         cpu_reg.mode = BNX2_COM_CPU_MODE;
3063         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3064         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3065         cpu_reg.state = BNX2_COM_CPU_STATE;
3066         cpu_reg.state_value_clear = 0xffffff;
3067         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3068         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3069         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3070         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3071         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3072         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3073         cpu_reg.mips_view_base = 0x8000000;
3074
3075         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3076                 fw = &bnx2_com_fw_09;
3077         else
3078                 fw = &bnx2_com_fw_06;
3079
3080         rc = load_cpu_fw(bp, &cpu_reg, fw);
3081         if (rc)
3082                 goto init_cpu_err;
3083
3084         /* Initialize the Command Processor. */
3085         cpu_reg.mode = BNX2_CP_CPU_MODE;
3086         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3087         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3088         cpu_reg.state = BNX2_CP_CPU_STATE;
3089         cpu_reg.state_value_clear = 0xffffff;
3090         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3091         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3092         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3093         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3094         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3095         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3096         cpu_reg.mips_view_base = 0x8000000;
3097
3098         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3099                 fw = &bnx2_cp_fw_09;
3100
3101                 rc = load_cpu_fw(bp, &cpu_reg, fw);
3102                 if (rc)
3103                         goto init_cpu_err;
3104         }
3105 init_cpu_err:
3106         bnx2_gunzip_end(bp);
3107         return rc;
3108 }
3109
3110 static int
3111 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3112 {
3113         u16 pmcsr;
3114
3115         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3116
3117         switch (state) {
3118         case PCI_D0: {
3119                 u32 val;
3120
3121                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3122                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3123                         PCI_PM_CTRL_PME_STATUS);
3124
3125                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3126                         /* delay required during transition out of D3hot */
3127                         msleep(20);
3128
3129                 val = REG_RD(bp, BNX2_EMAC_MODE);
3130                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3131                 val &= ~BNX2_EMAC_MODE_MPKT;
3132                 REG_WR(bp, BNX2_EMAC_MODE, val);
3133
3134                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3135                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3136                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3137                 break;
3138         }
3139         case PCI_D3hot: {
3140                 int i;
3141                 u32 val, wol_msg;
3142
3143                 if (bp->wol) {
3144                         u32 advertising;
3145                         u8 autoneg;
3146
3147                         autoneg = bp->autoneg;
3148                         advertising = bp->advertising;
3149
3150                         bp->autoneg = AUTONEG_SPEED;
3151                         bp->advertising = ADVERTISED_10baseT_Half |
3152                                 ADVERTISED_10baseT_Full |
3153                                 ADVERTISED_100baseT_Half |
3154                                 ADVERTISED_100baseT_Full |
3155                                 ADVERTISED_Autoneg;
3156
3157                         bnx2_setup_copper_phy(bp);
3158
3159                         bp->autoneg = autoneg;
3160                         bp->advertising = advertising;
3161
3162                         bnx2_set_mac_addr(bp);
3163
3164                         val = REG_RD(bp, BNX2_EMAC_MODE);
3165
3166                         /* Enable port mode. */
3167                         val &= ~BNX2_EMAC_MODE_PORT;
3168                         val |= BNX2_EMAC_MODE_PORT_MII |
3169                                BNX2_EMAC_MODE_MPKT_RCVD |
3170                                BNX2_EMAC_MODE_ACPI_RCVD |
3171                                BNX2_EMAC_MODE_MPKT;
3172
3173                         REG_WR(bp, BNX2_EMAC_MODE, val);
3174
3175                         /* receive all multicast */
3176                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3177                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3178                                        0xffffffff);
3179                         }
3180                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3181                                BNX2_EMAC_RX_MODE_SORT_MODE);
3182
3183                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3184                               BNX2_RPM_SORT_USER0_MC_EN;
3185                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3186                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3187                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3188                                BNX2_RPM_SORT_USER0_ENA);
3189
3190                         /* Need to enable EMAC and RPM for WOL. */
3191                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3192                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3193                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3194                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3195
3196                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3197                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3198                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3199
3200                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3201                 }
3202                 else {
3203                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3204                 }
3205
3206                 if (!(bp->flags & NO_WOL_FLAG))
3207                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3208
3209                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3210                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3211                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3212
3213                         if (bp->wol)
3214                                 pmcsr |= 3;
3215                 }
3216                 else {
3217                         pmcsr |= 3;
3218                 }
3219                 if (bp->wol) {
3220                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3221                 }
3222                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3223                                       pmcsr);
3224
3225                 /* No more memory access after this point until
3226                  * device is brought back to D0.
3227                  */
3228                 udelay(50);
3229                 break;
3230         }
3231         default:
3232                 return -EINVAL;
3233         }
3234         return 0;
3235 }
3236
3237 static int
3238 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3239 {
3240         u32 val;
3241         int j;
3242
3243         /* Request access to the flash interface. */
3244         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3245         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3246                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3247                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3248                         break;
3249
3250                 udelay(5);
3251         }
3252
3253         if (j >= NVRAM_TIMEOUT_COUNT)
3254                 return -EBUSY;
3255
3256         return 0;
3257 }
3258
3259 static int
3260 bnx2_release_nvram_lock(struct bnx2 *bp)
3261 {
3262         int j;
3263         u32 val;
3264
3265         /* Relinquish nvram interface. */
3266         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3267
3268         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3269                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3270                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3271                         break;
3272
3273                 udelay(5);
3274         }
3275
3276         if (j >= NVRAM_TIMEOUT_COUNT)
3277                 return -EBUSY;
3278
3279         return 0;
3280 }
3281
3282
3283 static int
3284 bnx2_enable_nvram_write(struct bnx2 *bp)
3285 {
3286         u32 val;
3287
3288         val = REG_RD(bp, BNX2_MISC_CFG);
3289         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3290
3291         if (!bp->flash_info->buffered) {
3292                 int j;
3293
3294                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3295                 REG_WR(bp, BNX2_NVM_COMMAND,
3296                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3297
3298                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3299                         udelay(5);
3300
3301                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3302                         if (val & BNX2_NVM_COMMAND_DONE)
3303                                 break;
3304                 }
3305
3306                 if (j >= NVRAM_TIMEOUT_COUNT)
3307                         return -EBUSY;
3308         }
3309         return 0;
3310 }
3311
3312 static void
3313 bnx2_disable_nvram_write(struct bnx2 *bp)
3314 {
3315         u32 val;
3316
3317         val = REG_RD(bp, BNX2_MISC_CFG);
3318         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3319 }
3320
3321
3322 static void
3323 bnx2_enable_nvram_access(struct bnx2 *bp)
3324 {
3325         u32 val;
3326
3327         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3328         /* Enable both bits, even on read. */
3329         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3330                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3331 }
3332
3333 static void
3334 bnx2_disable_nvram_access(struct bnx2 *bp)
3335 {
3336         u32 val;
3337
3338         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3339         /* Disable both bits, even after read. */
3340         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3341                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3342                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3343 }
3344
3345 static int
3346 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3347 {
3348         u32 cmd;
3349         int j;
3350
3351         if (bp->flash_info->buffered)
3352                 /* Buffered flash, no erase needed */
3353                 return 0;
3354
3355         /* Build an erase command */
3356         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3357               BNX2_NVM_COMMAND_DOIT;
3358
3359         /* Need to clear DONE bit separately. */
3360         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3361
3362         /* Address of the NVRAM to read from. */
3363         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3364
3365         /* Issue an erase command. */
3366         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3367
3368         /* Wait for completion. */
3369         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3370                 u32 val;
3371
3372                 udelay(5);
3373
3374                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3375                 if (val & BNX2_NVM_COMMAND_DONE)
3376                         break;
3377         }
3378
3379         if (j >= NVRAM_TIMEOUT_COUNT)
3380                 return -EBUSY;
3381
3382         return 0;
3383 }
3384
3385 static int
3386 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3387 {
3388         u32 cmd;
3389         int j;
3390
3391         /* Build the command word. */
3392         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3393
3394         /* Calculate an offset of a buffered flash. */
3395         if (bp->flash_info->buffered) {
3396                 offset = ((offset / bp->flash_info->page_size) <<
3397                            bp->flash_info->page_bits) +
3398                           (offset % bp->flash_info->page_size);
3399         }
3400
3401         /* Need to clear DONE bit separately. */
3402         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3403
3404         /* Address of the NVRAM to read from. */
3405         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3406
3407         /* Issue a read command. */
3408         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3409
3410         /* Wait for completion. */
3411         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3412                 u32 val;
3413
3414                 udelay(5);
3415
3416                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3417                 if (val & BNX2_NVM_COMMAND_DONE) {
3418                         val = REG_RD(bp, BNX2_NVM_READ);
3419
3420                         val = be32_to_cpu(val);
3421                         memcpy(ret_val, &val, 4);
3422                         break;
3423                 }
3424         }
3425         if (j >= NVRAM_TIMEOUT_COUNT)
3426                 return -EBUSY;
3427
3428         return 0;
3429 }
3430
3431
3432 static int
3433 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3434 {
3435         u32 cmd, val32;
3436         int j;
3437
3438         /* Build the command word. */
3439         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3440
3441         /* Calculate an offset of a buffered flash. */
3442         if (bp->flash_info->buffered) {
3443                 offset = ((offset / bp->flash_info->page_size) <<
3444                           bp->flash_info->page_bits) +
3445                          (offset % bp->flash_info->page_size);
3446         }
3447
3448         /* Need to clear DONE bit separately. */
3449         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3450
3451         memcpy(&val32, val, 4);
3452         val32 = cpu_to_be32(val32);
3453
3454         /* Write the data. */
3455         REG_WR(bp, BNX2_NVM_WRITE, val32);
3456
3457         /* Address of the NVRAM to write to. */
3458         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3459
3460         /* Issue the write command. */
3461         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3462
3463         /* Wait for completion. */
3464         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3465                 udelay(5);
3466
3467                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3468                         break;
3469         }
3470         if (j >= NVRAM_TIMEOUT_COUNT)
3471                 return -EBUSY;
3472
3473         return 0;
3474 }
3475
3476 static int
3477 bnx2_init_nvram(struct bnx2 *bp)
3478 {
3479         u32 val;
3480         int j, entry_count, rc;
3481         struct flash_spec *flash;
3482
3483         /* Determine the selected interface. */
3484         val = REG_RD(bp, BNX2_NVM_CFG1);
3485
3486         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3487
3488         rc = 0;
3489         if (val & 0x40000000) {
3490
3491                 /* Flash interface has been reconfigured */
3492                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3493                      j++, flash++) {
3494                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3495                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3496                                 bp->flash_info = flash;
3497                                 break;
3498                         }
3499                 }
3500         }
3501         else {
3502                 u32 mask;
3503                 /* Not yet been reconfigured */
3504
3505                 if (val & (1 << 23))
3506                         mask = FLASH_BACKUP_STRAP_MASK;
3507                 else
3508                         mask = FLASH_STRAP_MASK;
3509
3510                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3511                         j++, flash++) {
3512
3513                         if ((val & mask) == (flash->strapping & mask)) {
3514                                 bp->flash_info = flash;
3515
3516                                 /* Request access to the flash interface. */
3517                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3518                                         return rc;
3519
3520                                 /* Enable access to flash interface */
3521                                 bnx2_enable_nvram_access(bp);
3522
3523                                 /* Reconfigure the flash interface */
3524                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3525                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3526                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3527                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3528
3529                                 /* Disable access to flash interface */
3530                                 bnx2_disable_nvram_access(bp);
3531                                 bnx2_release_nvram_lock(bp);
3532
3533                                 break;
3534                         }
3535                 }
3536         } /* if (val & 0x40000000) */
3537
3538         if (j == entry_count) {
3539                 bp->flash_info = NULL;
3540                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3541                 return -ENODEV;
3542         }
3543
3544         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3545         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3546         if (val)
3547                 bp->flash_size = val;
3548         else
3549                 bp->flash_size = bp->flash_info->total_size;
3550
3551         return rc;
3552 }
3553
3554 static int
3555 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3556                 int buf_size)
3557 {
3558         int rc = 0;
3559         u32 cmd_flags, offset32, len32, extra;
3560
3561         if (buf_size == 0)
3562                 return 0;
3563
3564         /* Request access to the flash interface. */
3565         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3566                 return rc;
3567
3568         /* Enable access to flash interface */
3569         bnx2_enable_nvram_access(bp);
3570
3571         len32 = buf_size;
3572         offset32 = offset;
3573         extra = 0;
3574
3575         cmd_flags = 0;
3576
3577         if (offset32 & 3) {
3578                 u8 buf[4];
3579                 u32 pre_len;
3580
3581                 offset32 &= ~3;
3582                 pre_len = 4 - (offset & 3);
3583
3584                 if (pre_len >= len32) {
3585                         pre_len = len32;
3586                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3587                                     BNX2_NVM_COMMAND_LAST;
3588                 }
3589                 else {
3590                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3591                 }
3592
3593                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3594
3595                 if (rc)
3596                         return rc;
3597
3598                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3599
3600                 offset32 += 4;
3601                 ret_buf += pre_len;
3602                 len32 -= pre_len;
3603         }
3604         if (len32 & 3) {
3605                 extra = 4 - (len32 & 3);
3606                 len32 = (len32 + 4) & ~3;
3607         }
3608
3609         if (len32 == 4) {
3610                 u8 buf[4];
3611
3612                 if (cmd_flags)
3613                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3614                 else
3615                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3616                                     BNX2_NVM_COMMAND_LAST;
3617
3618                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3619
3620                 memcpy(ret_buf, buf, 4 - extra);
3621         }
3622         else if (len32 > 0) {
3623                 u8 buf[4];
3624
3625                 /* Read the first word. */
3626                 if (cmd_flags)
3627                         cmd_flags = 0;
3628                 else
3629                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3630
3631                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3632
3633                 /* Advance to the next dword. */
3634                 offset32 += 4;
3635                 ret_buf += 4;
3636                 len32 -= 4;
3637
3638                 while (len32 > 4 && rc == 0) {
3639                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3640
3641                         /* Advance to the next dword. */
3642                         offset32 += 4;
3643                         ret_buf += 4;
3644                         len32 -= 4;
3645                 }
3646
3647                 if (rc)
3648                         return rc;
3649
3650                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3651                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3652
3653                 memcpy(ret_buf, buf, 4 - extra);
3654         }
3655
3656         /* Disable access to flash interface */
3657         bnx2_disable_nvram_access(bp);
3658
3659         bnx2_release_nvram_lock(bp);
3660
3661         return rc;
3662 }
3663
3664 static int
3665 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3666                 int buf_size)
3667 {
3668         u32 written, offset32, len32;
3669         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3670         int rc = 0;
3671         int align_start, align_end;
3672
3673         buf = data_buf;
3674         offset32 = offset;
3675         len32 = buf_size;
3676         align_start = align_end = 0;
3677
3678         if ((align_start = (offset32 & 3))) {
3679                 offset32 &= ~3;
3680                 len32 += align_start;
3681                 if (len32 < 4)
3682                         len32 = 4;
3683                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3684                         return rc;
3685         }
3686
3687         if (len32 & 3) {
3688                 align_end = 4 - (len32 & 3);
3689                 len32 += align_end;
3690                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3691                         return rc;
3692         }
3693
3694         if (align_start || align_end) {
3695                 align_buf = kmalloc(len32, GFP_KERNEL);
3696                 if (align_buf == NULL)
3697                         return -ENOMEM;
3698                 if (align_start) {
3699                         memcpy(align_buf, start, 4);
3700                 }
3701                 if (align_end) {
3702                         memcpy(align_buf + len32 - 4, end, 4);
3703                 }
3704                 memcpy(align_buf + align_start, data_buf, buf_size);
3705                 buf = align_buf;
3706         }
3707
3708         if (bp->flash_info->buffered == 0) {
3709                 flash_buffer = kmalloc(264, GFP_KERNEL);
3710                 if (flash_buffer == NULL) {
3711                         rc = -ENOMEM;
3712                         goto nvram_write_end;
3713                 }
3714         }
3715
3716         written = 0;
3717         while ((written < len32) && (rc == 0)) {
3718                 u32 page_start, page_end, data_start, data_end;
3719                 u32 addr, cmd_flags;
3720                 int i;
3721
3722                 /* Find the page_start addr */
3723                 page_start = offset32 + written;
3724                 page_start -= (page_start % bp->flash_info->page_size);
3725                 /* Find the page_end addr */
3726                 page_end = page_start + bp->flash_info->page_size;
3727                 /* Find the data_start addr */
3728                 data_start = (written == 0) ? offset32 : page_start;
3729                 /* Find the data_end addr */
3730                 data_end = (page_end > offset32 + len32) ?
3731                         (offset32 + len32) : page_end;
3732
3733                 /* Request access to the flash interface. */
3734                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3735                         goto nvram_write_end;
3736
3737                 /* Enable access to flash interface */
3738                 bnx2_enable_nvram_access(bp);
3739
3740                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3741                 if (bp->flash_info->buffered == 0) {
3742                         int j;
3743
3744                         /* Read the whole page into the buffer
3745                          * (non-buffer flash only) */
3746                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3747                                 if (j == (bp->flash_info->page_size - 4)) {
3748                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3749                                 }
3750                                 rc = bnx2_nvram_read_dword(bp,
3751                                         page_start + j,
3752                                         &flash_buffer[j],
3753                                         cmd_flags);
3754
3755                                 if (rc)
3756                                         goto nvram_write_end;
3757
3758                                 cmd_flags = 0;
3759                         }
3760                 }
3761
3762                 /* Enable writes to flash interface (unlock write-protect) */
3763                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3764                         goto nvram_write_end;
3765
3766                 /* Loop to write back the buffer data from page_start to
3767                  * data_start */
3768                 i = 0;
3769                 if (bp->flash_info->buffered == 0) {
3770                         /* Erase the page */
3771                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3772                                 goto nvram_write_end;
3773
3774                         /* Re-enable the write again for the actual write */
3775                         bnx2_enable_nvram_write(bp);
3776
3777                         for (addr = page_start; addr < data_start;
3778                                 addr += 4, i += 4) {
3779
3780                                 rc = bnx2_nvram_write_dword(bp, addr,
3781                                         &flash_buffer[i], cmd_flags);
3782
3783                                 if (rc != 0)
3784                                         goto nvram_write_end;
3785
3786                                 cmd_flags = 0;
3787                         }
3788                 }
3789
3790                 /* Loop to write the new data from data_start to data_end */
3791                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3792                         if ((addr == page_end - 4) ||
3793                                 ((bp->flash_info->buffered) &&
3794                                  (addr == data_end - 4))) {
3795
3796                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3797                         }
3798                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3799                                 cmd_flags);
3800
3801                         if (rc != 0)
3802                                 goto nvram_write_end;
3803
3804                         cmd_flags = 0;
3805                         buf += 4;
3806                 }
3807
3808                 /* Loop to write back the buffer data from data_end
3809                  * to page_end */
3810                 if (bp->flash_info->buffered == 0) {
3811                         for (addr = data_end; addr < page_end;
3812                                 addr += 4, i += 4) {
3813
3814                                 if (addr == page_end-4) {
3815                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3816                                 }
3817                                 rc = bnx2_nvram_write_dword(bp, addr,
3818                                         &flash_buffer[i], cmd_flags);
3819
3820                                 if (rc != 0)
3821                                         goto nvram_write_end;
3822
3823                                 cmd_flags = 0;
3824                         }
3825                 }
3826
3827                 /* Disable writes to flash interface (lock write-protect) */
3828                 bnx2_disable_nvram_write(bp);
3829
3830                 /* Disable access to flash interface */
3831                 bnx2_disable_nvram_access(bp);
3832                 bnx2_release_nvram_lock(bp);
3833
3834                 /* Increment written */
3835                 written += data_end - data_start;
3836         }
3837
3838 nvram_write_end:
3839         kfree(flash_buffer);
3840         kfree(align_buf);
3841         return rc;
3842 }
3843
3844 static void
3845 bnx2_init_remote_phy(struct bnx2 *bp)
3846 {
3847         u32 val;
3848
3849         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3850         if (!(bp->phy_flags & PHY_SERDES_FLAG))
3851                 return;
3852
3853         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3854         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3855                 return;
3856
3857         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3858                 if (netif_running(bp->dev)) {
3859                         val = BNX2_DRV_ACK_CAP_SIGNATURE |
3860                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3861                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3862                                    val);
3863                 }
3864                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3865
3866                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3867                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3868                         bp->phy_port = PORT_FIBRE;
3869                 else
3870                         bp->phy_port = PORT_TP;
3871         }
3872 }
3873
3874 static int
3875 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3876 {
3877         u32 val;
3878         int i, rc = 0;
3879
3880         /* Wait for the current PCI transaction to complete before
3881          * issuing a reset. */
3882         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3883                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3884                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3885                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3886                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3887         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3888         udelay(5);
3889
3890         /* Wait for the firmware to tell us it is ok to issue a reset. */
3891         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3892
3893         /* Deposit a driver reset signature so the firmware knows that
3894          * this is a soft reset. */
3895         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3896                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3897
3898         /* Do a dummy read to force the chip to complete all current transaction
3899          * before we issue a reset. */
3900         val = REG_RD(bp, BNX2_MISC_ID);
3901
3902         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3903                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3904                 REG_RD(bp, BNX2_MISC_COMMAND);
3905                 udelay(5);
3906
3907                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3908                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3909
3910                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3911
3912         } else {
3913                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3914                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3915                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3916
3917                 /* Chip reset. */
3918                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3919
3920                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3921                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3922                         current->state = TASK_UNINTERRUPTIBLE;
3923                         schedule_timeout(HZ / 50);
3924                 }
3925
3926                 /* Reset takes approximate 30 usec */
3927                 for (i = 0; i < 10; i++) {
3928                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3929                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3930                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3931                                 break;
3932                         udelay(10);
3933                 }
3934
3935                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3936                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3937                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3938                         return -EBUSY;
3939                 }
3940         }
3941
3942         /* Make sure byte swapping is properly configured. */
3943         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3944         if (val != 0x01020304) {
3945                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3946                 return -ENODEV;
3947         }
3948
3949         /* Wait for the firmware to finish its initialization. */
3950         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3951         if (rc)
3952                 return rc;
3953
3954         spin_lock_bh(&bp->phy_lock);
3955         bnx2_init_remote_phy(bp);
3956         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3957                 bnx2_set_default_remote_link(bp);
3958         spin_unlock_bh(&bp->phy_lock);
3959
3960         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3961                 /* Adjust the voltage regular to two steps lower.  The default
3962                  * of this register is 0x0000000e. */
3963                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3964
3965                 /* Remove bad rbuf memory from the free pool. */
3966                 rc = bnx2_alloc_bad_rbuf(bp);
3967         }
3968
3969         return rc;
3970 }
3971
3972 static int
3973 bnx2_init_chip(struct bnx2 *bp)
3974 {
3975         u32 val;
3976         int rc;
3977
3978         /* Make sure the interrupt is not active. */
3979         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3980
3981         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3982               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3983 #ifdef __BIG_ENDIAN
3984               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3985 #endif
3986               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3987               DMA_READ_CHANS << 12 |
3988               DMA_WRITE_CHANS << 16;
3989
3990         val |= (0x2 << 20) | (1 << 11);
3991
3992         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3993                 val |= (1 << 23);
3994
3995         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3996             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3997                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3998
3999         REG_WR(bp, BNX2_DMA_CONFIG, val);
4000
4001         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4002                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4003                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4004                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4005         }
4006
4007         if (bp->flags & PCIX_FLAG) {
4008                 u16 val16;
4009
4010                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4011                                      &val16);
4012                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4013                                       val16 & ~PCI_X_CMD_ERO);
4014         }
4015
4016         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4017                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4018                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4019                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4020
4021         /* Initialize context mapping and zero out the quick contexts.  The
4022          * context block must have already been enabled. */
4023         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4024                 rc = bnx2_init_5709_context(bp);
4025                 if (rc)
4026                         return rc;
4027         } else
4028                 bnx2_init_context(bp);
4029
4030         if ((rc = bnx2_init_cpus(bp)) != 0)
4031                 return rc;
4032
4033         bnx2_init_nvram(bp);
4034
4035         bnx2_set_mac_addr(bp);
4036
4037         val = REG_RD(bp, BNX2_MQ_CONFIG);
4038         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4039         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4040         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4041                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4042
4043         REG_WR(bp, BNX2_MQ_CONFIG, val);
4044
4045         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4046         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4047         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4048
4049         val = (BCM_PAGE_BITS - 8) << 24;
4050         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4051
4052         /* Configure page size. */
4053         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4054         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4055         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4056         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4057
4058         val = bp->mac_addr[0] +
4059               (bp->mac_addr[1] << 8) +
4060               (bp->mac_addr[2] << 16) +
4061               bp->mac_addr[3] +
4062               (bp->mac_addr[4] << 8) +
4063               (bp->mac_addr[5] << 16);
4064         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4065
4066         /* Program the MTU.  Also include 4 bytes for CRC32. */
4067         val = bp->dev->mtu + ETH_HLEN + 4;
4068         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4069                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4070         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4071
4072         bp->last_status_idx = 0;
4073         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4074
4075         /* Set up how to generate a link change interrupt. */
4076         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4077
4078         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4079                (u64) bp->status_blk_mapping & 0xffffffff);
4080         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4081
4082         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4083                (u64) bp->stats_blk_mapping & 0xffffffff);
4084         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4085                (u64) bp->stats_blk_mapping >> 32);
4086
4087         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4088                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4089
4090         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4091                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4092
4093         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4094                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4095
4096         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4097
4098         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4099
4100         REG_WR(bp, BNX2_HC_COM_TICKS,
4101                (bp->com_ticks_int << 16) | bp->com_ticks);
4102
4103         REG_WR(bp, BNX2_HC_CMD_TICKS,
4104                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4105
4106         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4107                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4108         else
4109                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
4110         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4111
4112         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4113                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4114         else {
4115                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4116                       BNX2_HC_CONFIG_COLLECT_STATS;
4117         }
4118
4119         if (bp->flags & ONE_SHOT_MSI_FLAG)
4120                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4121
4122         REG_WR(bp, BNX2_HC_CONFIG, val);
4123
4124         /* Clear internal stats counters. */
4125         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4126
4127         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4128
4129         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4130             BNX2_PORT_FEATURE_ASF_ENABLED)
4131                 bp->flags |= ASF_ENABLE_FLAG;
4132
4133         /* Initialize the receive filter. */
4134         bnx2_set_rx_mode(bp->dev);
4135
4136         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4137                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4138                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4139                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4140         }
4141         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4142                           0);
4143
4144         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4145         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4146
4147         udelay(20);
4148
4149         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4150
4151         return rc;
4152 }
4153
4154 static void
4155 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4156 {
4157         u32 val, offset0, offset1, offset2, offset3;
4158
4159         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4160                 offset0 = BNX2_L2CTX_TYPE_XI;
4161                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4162                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4163                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4164         } else {
4165                 offset0 = BNX2_L2CTX_TYPE;
4166                 offset1 = BNX2_L2CTX_CMD_TYPE;
4167                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4168                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4169         }
4170         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4171         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4172
4173         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4174         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4175
4176         val = (u64) bp->tx_desc_mapping >> 32;
4177         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4178
4179         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4180         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4181 }
4182
4183 static void
4184 bnx2_init_tx_ring(struct bnx2 *bp)
4185 {
4186         struct tx_bd *txbd;
4187         u32 cid;
4188
4189         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4190
4191         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4192
4193         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4194         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4195
4196         bp->tx_prod = 0;
4197         bp->tx_cons = 0;
4198         bp->hw_tx_cons = 0;
4199         bp->tx_prod_bseq = 0;
4200
4201         cid = TX_CID;
4202         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4203         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4204
4205         bnx2_init_tx_context(bp, cid);
4206 }
4207
4208 static void
4209 bnx2_init_rx_ring(struct bnx2 *bp)
4210 {
4211         struct rx_bd *rxbd;
4212         int i;
4213         u16 prod, ring_prod;
4214         u32 val;
4215
4216         /* 8 for CRC and VLAN */
4217         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4218         /* hw alignment */
4219         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4220
4221         ring_prod = prod = bp->rx_prod = 0;
4222         bp->rx_cons = 0;
4223         bp->hw_rx_cons = 0;
4224         bp->rx_prod_bseq = 0;
4225
4226         for (i = 0; i < bp->rx_max_ring; i++) {
4227                 int j;
4228
4229                 rxbd = &bp->rx_desc_ring[i][0];
4230                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4231                         rxbd->rx_bd_len = bp->rx_buf_use_size;
4232                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4233                 }
4234                 if (i == (bp->rx_max_ring - 1))
4235                         j = 0;
4236                 else
4237                         j = i + 1;
4238                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4239                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4240                                        0xffffffff;
4241         }
4242
4243         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4244         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4245         val |= 0x02 << 8;
4246         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4247
4248         val = (u64) bp->rx_desc_mapping[0] >> 32;
4249         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4250
4251         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4252         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4253
4254         for (i = 0; i < bp->rx_ring_size; i++) {
4255                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4256                         break;
4257                 }
4258                 prod = NEXT_RX_BD(prod);
4259                 ring_prod = RX_RING_IDX(prod);
4260         }
4261         bp->rx_prod = prod;
4262
4263         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4264
4265         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4266 }
4267
4268 static void
4269 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4270 {
4271         u32 num_rings, max;
4272
4273         bp->rx_ring_size = size;
4274         num_rings = 1;
4275         while (size > MAX_RX_DESC_CNT) {
4276                 size -= MAX_RX_DESC_CNT;
4277                 num_rings++;
4278         }
4279         /* round to next power of 2 */
4280         max = MAX_RX_RINGS;
4281         while ((max & num_rings) == 0)
4282                 max >>= 1;
4283
4284         if (num_rings != max)
4285                 max <<= 1;
4286
4287         bp->rx_max_ring = max;
4288         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4289 }
4290
4291 static void
4292 bnx2_free_tx_skbs(struct bnx2 *bp)
4293 {
4294         int i;
4295
4296         if (bp->tx_buf_ring == NULL)
4297                 return;
4298
4299         for (i = 0; i < TX_DESC_CNT; ) {
4300                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4301                 struct sk_buff *skb = tx_buf->skb;
4302                 int j, last;
4303
4304                 if (skb == NULL) {
4305                         i++;
4306                         continue;
4307                 }
4308
4309                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4310                         skb_headlen(skb), PCI_DMA_TODEVICE);
4311
4312                 tx_buf->skb = NULL;
4313
4314                 last = skb_shinfo(skb)->nr_frags;
4315                 for (j = 0; j < last; j++) {
4316                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4317                         pci_unmap_page(bp->pdev,
4318                                 pci_unmap_addr(tx_buf, mapping),
4319                                 skb_shinfo(skb)->frags[j].size,
4320                                 PCI_DMA_TODEVICE);
4321                 }
4322                 dev_kfree_skb(skb);
4323                 i += j + 1;
4324         }
4325
4326 }
4327
4328 static void
4329 bnx2_free_rx_skbs(struct bnx2 *bp)
4330 {
4331         int i;
4332
4333         if (bp->rx_buf_ring == NULL)
4334                 return;
4335
4336         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4337                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4338                 struct sk_buff *skb = rx_buf->skb;
4339
4340                 if (skb == NULL)
4341                         continue;
4342
4343                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4344                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4345
4346                 rx_buf->skb = NULL;
4347
4348                 dev_kfree_skb(skb);
4349         }
4350 }
4351
4352 static void
4353 bnx2_free_skbs(struct bnx2 *bp)
4354 {
4355         bnx2_free_tx_skbs(bp);
4356         bnx2_free_rx_skbs(bp);
4357 }
4358
4359 static int
4360 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4361 {
4362         int rc;
4363
4364         rc = bnx2_reset_chip(bp, reset_code);
4365         bnx2_free_skbs(bp);
4366         if (rc)
4367                 return rc;
4368
4369         if ((rc = bnx2_init_chip(bp)) != 0)
4370                 return rc;
4371
4372         bnx2_init_tx_ring(bp);
4373         bnx2_init_rx_ring(bp);
4374         return 0;
4375 }
4376
4377 static int
4378 bnx2_init_nic(struct bnx2 *bp)
4379 {
4380         int rc;
4381
4382         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4383                 return rc;
4384
4385         spin_lock_bh(&bp->phy_lock);
4386         bnx2_init_phy(bp);
4387         bnx2_set_link(bp);
4388         spin_unlock_bh(&bp->phy_lock);
4389         return 0;
4390 }
4391
4392 static int
4393 bnx2_test_registers(struct bnx2 *bp)
4394 {
4395         int ret;
4396         int i, is_5709;
4397         static const struct {
4398                 u16   offset;
4399                 u16   flags;
4400 #define BNX2_FL_NOT_5709        1
4401                 u32   rw_mask;
4402                 u32   ro_mask;
4403         } reg_tbl[] = {
4404                 { 0x006c, 0, 0x00000000, 0x0000003f },
4405                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4406                 { 0x0094, 0, 0x00000000, 0x00000000 },
4407
4408                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4409                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4410                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4411                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4412                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4413                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4414                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4415                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4416                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4417
4418                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4419                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4420                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4421                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4422                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4423                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4424
4425                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4426                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4427                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4428
4429                 { 0x1000, 0, 0x00000000, 0x00000001 },
4430                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4431
4432                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4433                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4434                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4435                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4436                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4437                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4438                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4439                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4440                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4441                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4442
4443                 { 0x1800, 0, 0x00000000, 0x00000001 },
4444                 { 0x1804, 0, 0x00000000, 0x00000003 },
4445
4446                 { 0x2800, 0, 0x00000000, 0x00000001 },
4447                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4448                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4449                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4450                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4451                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4452                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4453                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4454                 { 0x2840, 0, 0x00000000, 0xffffffff },
4455                 { 0x2844, 0, 0x00000000, 0xffffffff },
4456                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4457                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4458
4459                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4460                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4461
4462                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4463                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4464                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4465                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4466                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4467                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4468                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4469                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4470                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4471
4472                 { 0x5004, 0, 0x00000000, 0x0000007f },
4473                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4474
4475                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4476                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4477                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4478                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4479                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4480                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4481                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4482                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4483                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4484
4485                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4486                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4487                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4488                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4489                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4490                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4491                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4492                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4493                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4494                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4495                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4496                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4497                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4498                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4499                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4500                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4501                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4502                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4503                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4504                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4505                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4506                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4507                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4508
4509                 { 0xffff, 0, 0x00000000, 0x00000000 },
4510         };
4511
4512         ret = 0;
4513         is_5709 = 0;
4514         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4515                 is_5709 = 1;
4516
4517         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4518                 u32 offset, rw_mask, ro_mask, save_val, val;
4519                 u16 flags = reg_tbl[i].flags;
4520
4521                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4522                         continue;
4523
4524                 offset = (u32) reg_tbl[i].offset;
4525                 rw_mask = reg_tbl[i].rw_mask;
4526                 ro_mask = reg_tbl[i].ro_mask;
4527
4528                 save_val = readl(bp->regview + offset);
4529
4530                 writel(0, bp->regview + offset);
4531
4532                 val = readl(bp->regview + offset);
4533                 if ((val & rw_mask) != 0) {
4534                         goto reg_test_err;
4535                 }
4536
4537                 if ((val & ro_mask) != (save_val & ro_mask)) {
4538                         goto reg_test_err;
4539                 }
4540
4541                 writel(0xffffffff, bp->regview + offset);
4542
4543                 val = readl(bp->regview + offset);
4544                 if ((val & rw_mask) != rw_mask) {
4545                         goto reg_test_err;
4546                 }
4547
4548                 if ((val & ro_mask) != (save_val & ro_mask)) {
4549                         goto reg_test_err;
4550                 }
4551
4552                 writel(save_val, bp->regview + offset);
4553                 continue;
4554
4555 reg_test_err:
4556                 writel(save_val, bp->regview + offset);
4557                 ret = -ENODEV;
4558                 break;
4559         }
4560         return ret;
4561 }
4562
4563 static int
4564 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4565 {
4566         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4567                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4568         int i;
4569
4570         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4571                 u32 offset;
4572
4573                 for (offset = 0; offset < size; offset += 4) {
4574
4575                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4576
4577                         if (REG_RD_IND(bp, start + offset) !=
4578                                 test_pattern[i]) {
4579                                 return -ENODEV;
4580                         }
4581                 }
4582         }
4583         return 0;
4584 }
4585
4586 static int
4587 bnx2_test_memory(struct bnx2 *bp)
4588 {
4589         int ret = 0;
4590         int i;
4591         static struct mem_entry {
4592                 u32   offset;
4593                 u32   len;
4594         } mem_tbl_5706[] = {
4595                 { 0x60000,  0x4000 },
4596                 { 0xa0000,  0x3000 },
4597                 { 0xe0000,  0x4000 },
4598                 { 0x120000, 0x4000 },
4599                 { 0x1a0000, 0x4000 },
4600                 { 0x160000, 0x4000 },
4601                 { 0xffffffff, 0    },
4602         },
4603         mem_tbl_5709[] = {
4604                 { 0x60000,  0x4000 },
4605                 { 0xa0000,  0x3000 },
4606                 { 0xe0000,  0x4000 },
4607                 { 0x120000, 0x4000 },
4608                 { 0x1a0000, 0x4000 },
4609                 { 0xffffffff, 0    },
4610         };
4611         struct mem_entry *mem_tbl;
4612
4613         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4614                 mem_tbl = mem_tbl_5709;
4615         else
4616                 mem_tbl = mem_tbl_5706;
4617
4618         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4619                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4620                         mem_tbl[i].len)) != 0) {
4621                         return ret;
4622                 }
4623         }
4624
4625         return ret;
4626 }
4627
4628 #define BNX2_MAC_LOOPBACK       0
4629 #define BNX2_PHY_LOOPBACK       1
4630
4631 static int
4632 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4633 {
4634         unsigned int pkt_size, num_pkts, i;
4635         struct sk_buff *skb, *rx_skb;
4636         unsigned char *packet;
4637         u16 rx_start_idx, rx_idx;
4638         dma_addr_t map;
4639         struct tx_bd *txbd;
4640         struct sw_bd *rx_buf;
4641         struct l2_fhdr *rx_hdr;
4642         int ret = -ENODEV;
4643
4644         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4645                 bp->loopback = MAC_LOOPBACK;
4646                 bnx2_set_mac_loopback(bp);
4647         }
4648         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4649                 bp->loopback = PHY_LOOPBACK;
4650                 bnx2_set_phy_loopback(bp);
4651         }
4652         else
4653                 return -EINVAL;
4654
4655         pkt_size = 1514;
4656         skb = netdev_alloc_skb(bp->dev, pkt_size);
4657         if (!skb)
4658                 return -ENOMEM;
4659         packet = skb_put(skb, pkt_size);
4660         memcpy(packet, bp->dev->dev_addr, 6);
4661         memset(packet + 6, 0x0, 8);
4662         for (i = 14; i < pkt_size; i++)
4663                 packet[i] = (unsigned char) (i & 0xff);
4664
4665         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4666                 PCI_DMA_TODEVICE);
4667
4668         REG_WR(bp, BNX2_HC_COMMAND,
4669                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4670
4671         REG_RD(bp, BNX2_HC_COMMAND);
4672
4673         udelay(5);
4674         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4675
4676         num_pkts = 0;
4677
4678         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4679
4680         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4681         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4682         txbd->tx_bd_mss_nbytes = pkt_size;
4683         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4684
4685         num_pkts++;
4686         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4687         bp->tx_prod_bseq += pkt_size;
4688
4689         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4690         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4691
4692         udelay(100);
4693
4694         REG_WR(bp, BNX2_HC_COMMAND,
4695                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4696
4697         REG_RD(bp, BNX2_HC_COMMAND);
4698
4699         udelay(5);
4700
4701         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4702         dev_kfree_skb(skb);
4703
4704         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4705                 goto loopback_test_done;
4706         }
4707
4708         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4709         if (rx_idx != rx_start_idx + num_pkts) {
4710                 goto loopback_test_done;
4711         }
4712
4713         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4714         rx_skb = rx_buf->skb;
4715
4716         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4717         skb_reserve(rx_skb, bp->rx_offset);
4718
4719         pci_dma_sync_single_for_cpu(bp->pdev,
4720                 pci_unmap_addr(rx_buf, mapping),
4721                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4722
4723         if (rx_hdr->l2_fhdr_status &
4724                 (L2_FHDR_ERRORS_BAD_CRC |
4725                 L2_FHDR_ERRORS_PHY_DECODE |
4726                 L2_FHDR_ERRORS_ALIGNMENT |
4727                 L2_FHDR_ERRORS_TOO_SHORT |
4728                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4729
4730                 goto loopback_test_done;
4731         }
4732
4733         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4734                 goto loopback_test_done;
4735         }
4736
4737         for (i = 14; i < pkt_size; i++) {
4738                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4739                         goto loopback_test_done;
4740                 }
4741         }
4742
4743         ret = 0;
4744
4745 loopback_test_done:
4746         bp->loopback = 0;
4747         return ret;
4748 }
4749
4750 #define BNX2_MAC_LOOPBACK_FAILED        1
4751 #define BNX2_PHY_LOOPBACK_FAILED        2
4752 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4753                                          BNX2_PHY_LOOPBACK_FAILED)
4754
4755 static int
4756 bnx2_test_loopback(struct bnx2 *bp)
4757 {
4758         int rc = 0;
4759
4760         if (!netif_running(bp->dev))
4761                 return BNX2_LOOPBACK_FAILED;
4762
4763         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4764         spin_lock_bh(&bp->phy_lock);
4765         bnx2_init_phy(bp);
4766         spin_unlock_bh(&bp->phy_lock);
4767         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4768                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4769         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4770                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4771         return rc;
4772 }
4773
4774 #define NVRAM_SIZE 0x200
4775 #define CRC32_RESIDUAL 0xdebb20e3
4776
4777 static int
4778 bnx2_test_nvram(struct bnx2 *bp)
4779 {
4780         u32 buf[NVRAM_SIZE / 4];
4781         u8 *data = (u8 *) buf;
4782         int rc = 0;
4783         u32 magic, csum;
4784
4785         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4786                 goto test_nvram_done;
4787
4788         magic = be32_to_cpu(buf[0]);
4789         if (magic != 0x669955aa) {
4790                 rc = -ENODEV;
4791                 goto test_nvram_done;
4792         }
4793
4794         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4795                 goto test_nvram_done;
4796
4797         csum = ether_crc_le(0x100, data);
4798         if (csum != CRC32_RESIDUAL) {
4799                 rc = -ENODEV;
4800                 goto test_nvram_done;
4801         }
4802
4803         csum = ether_crc_le(0x100, data + 0x100);
4804         if (csum != CRC32_RESIDUAL) {
4805                 rc = -ENODEV;
4806         }
4807
4808 test_nvram_done:
4809         return rc;
4810 }
4811
4812 static int
4813 bnx2_test_link(struct bnx2 *bp)
4814 {
4815         u32 bmsr;
4816
4817         spin_lock_bh(&bp->phy_lock);
4818         bnx2_enable_bmsr1(bp);
4819         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4820         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4821         bnx2_disable_bmsr1(bp);
4822         spin_unlock_bh(&bp->phy_lock);
4823
4824         if (bmsr & BMSR_LSTATUS) {
4825                 return 0;
4826         }
4827         return -ENODEV;
4828 }
4829
4830 static int
4831 bnx2_test_intr(struct bnx2 *bp)
4832 {
4833         int i;
4834         u16 status_idx;
4835
4836         if (!netif_running(bp->dev))
4837                 return -ENODEV;
4838
4839         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4840
4841         /* This register is not touched during run-time. */
4842         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4843         REG_RD(bp, BNX2_HC_COMMAND);
4844
4845         for (i = 0; i < 10; i++) {
4846                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4847                         status_idx) {
4848
4849                         break;
4850                 }
4851
4852                 msleep_interruptible(10);
4853         }
4854         if (i < 10)
4855                 return 0;
4856
4857         return -ENODEV;
4858 }
4859
4860 static void
4861 bnx2_5706_serdes_timer(struct bnx2 *bp)
4862 {
4863         spin_lock(&bp->phy_lock);
4864         if (bp->serdes_an_pending)
4865                 bp->serdes_an_pending--;
4866         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4867                 u32 bmcr;
4868
4869                 bp->current_interval = bp->timer_interval;
4870
4871                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4872
4873                 if (bmcr & BMCR_ANENABLE) {
4874                         u32 phy1, phy2;
4875
4876                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4877                         bnx2_read_phy(bp, 0x1c, &phy1);
4878
4879                         bnx2_write_phy(bp, 0x17, 0x0f01);
4880                         bnx2_read_phy(bp, 0x15, &phy2);
4881                         bnx2_write_phy(bp, 0x17, 0x0f01);
4882                         bnx2_read_phy(bp, 0x15, &phy2);
4883
4884                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4885                                 !(phy2 & 0x20)) {       /* no CONFIG */
4886
4887                                 bmcr &= ~BMCR_ANENABLE;
4888                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4889                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4890                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4891                         }
4892                 }
4893         }
4894         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4895                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4896                 u32 phy2;
4897
4898                 bnx2_write_phy(bp, 0x17, 0x0f01);
4899                 bnx2_read_phy(bp, 0x15, &phy2);
4900                 if (phy2 & 0x20) {
4901                         u32 bmcr;
4902
4903                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4904                         bmcr |= BMCR_ANENABLE;
4905                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4906
4907                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4908                 }
4909         } else
4910                 bp->current_interval = bp->timer_interval;
4911
4912         spin_unlock(&bp->phy_lock);
4913 }
4914
4915 static void
4916 bnx2_5708_serdes_timer(struct bnx2 *bp)
4917 {
4918         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4919                 return;
4920
4921         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4922                 bp->serdes_an_pending = 0;
4923                 return;
4924         }
4925
4926         spin_lock(&bp->phy_lock);
4927         if (bp->serdes_an_pending)
4928                 bp->serdes_an_pending--;
4929         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4930                 u32 bmcr;
4931
4932                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4933                 if (bmcr & BMCR_ANENABLE) {
4934                         bnx2_enable_forced_2g5(bp);
4935                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4936                 } else {
4937                         bnx2_disable_forced_2g5(bp);
4938                         bp->serdes_an_pending = 2;
4939                         bp->current_interval = bp->timer_interval;
4940                 }
4941
4942         } else
4943                 bp->current_interval = bp->timer_interval;
4944
4945         spin_unlock(&bp->phy_lock);
4946 }
4947
4948 static void
4949 bnx2_timer(unsigned long data)
4950 {
4951         struct bnx2 *bp = (struct bnx2 *) data;
4952
4953         if (!netif_running(bp->dev))
4954                 return;
4955
4956         if (atomic_read(&bp->intr_sem) != 0)
4957                 goto bnx2_restart_timer;
4958
4959         bnx2_send_heart_beat(bp);
4960
4961         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4962
4963         /* workaround occasional corrupted counters */
4964         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4965                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4966                                             BNX2_HC_COMMAND_STATS_NOW);
4967
4968         if (bp->phy_flags & PHY_SERDES_FLAG) {
4969                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4970                         bnx2_5706_serdes_timer(bp);
4971                 else
4972                         bnx2_5708_serdes_timer(bp);
4973         }
4974
4975 bnx2_restart_timer:
4976         mod_timer(&bp->timer, jiffies + bp->current_interval);
4977 }
4978
4979 static int
4980 bnx2_request_irq(struct bnx2 *bp)
4981 {
4982         struct net_device *dev = bp->dev;
4983         int rc = 0;
4984
4985         if (bp->flags & USING_MSI_FLAG) {
4986                 irq_handler_t   fn = bnx2_msi;
4987
4988                 if (bp->flags & ONE_SHOT_MSI_FLAG)
4989                         fn = bnx2_msi_1shot;
4990
4991                 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4992         } else
4993                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4994                                  IRQF_SHARED, dev->name, dev);
4995         return rc;
4996 }
4997
4998 static void
4999 bnx2_free_irq(struct bnx2 *bp)
5000 {
5001         struct net_device *dev = bp->dev;
5002
5003         if (bp->flags & USING_MSI_FLAG) {
5004                 free_irq(bp->pdev->irq, dev);
5005                 pci_disable_msi(bp->pdev);
5006                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5007         } else
5008                 free_irq(bp->pdev->irq, dev);
5009 }
5010
5011 /* Called with rtnl_lock */
5012 static int
5013 bnx2_open(struct net_device *dev)
5014 {
5015         struct bnx2 *bp = netdev_priv(dev);
5016         int rc;
5017
5018         netif_carrier_off(dev);
5019
5020         bnx2_set_power_state(bp, PCI_D0);
5021         bnx2_disable_int(bp);
5022
5023         rc = bnx2_alloc_mem(bp);
5024         if (rc)
5025                 return rc;
5026
5027         if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5028                 if (pci_enable_msi(bp->pdev) == 0) {
5029                         bp->flags |= USING_MSI_FLAG;
5030                         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5031                                 bp->flags |= ONE_SHOT_MSI_FLAG;
5032                 }
5033         }
5034         rc = bnx2_request_irq(bp);
5035
5036         if (rc) {
5037                 bnx2_free_mem(bp);
5038                 return rc;
5039         }
5040
5041         rc = bnx2_init_nic(bp);
5042
5043         if (rc) {
5044                 bnx2_free_irq(bp);
5045                 bnx2_free_skbs(bp);
5046                 bnx2_free_mem(bp);
5047                 return rc;
5048         }
5049
5050         mod_timer(&bp->timer, jiffies + bp->current_interval);
5051
5052         atomic_set(&bp->intr_sem, 0);
5053
5054         bnx2_enable_int(bp);
5055
5056         if (bp->flags & USING_MSI_FLAG) {
5057                 /* Test MSI to make sure it is working
5058                  * If MSI test fails, go back to INTx mode
5059                  */
5060                 if (bnx2_test_intr(bp) != 0) {
5061                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5062                                " using MSI, switching to INTx mode. Please"
5063                                " report this failure to the PCI maintainer"
5064                                " and include system chipset information.\n",
5065                                bp->dev->name);
5066
5067                         bnx2_disable_int(bp);
5068                         bnx2_free_irq(bp);
5069
5070                         rc = bnx2_init_nic(bp);
5071
5072                         if (!rc)
5073                                 rc = bnx2_request_irq(bp);
5074
5075                         if (rc) {
5076                                 bnx2_free_skbs(bp);
5077                                 bnx2_free_mem(bp);
5078                                 del_timer_sync(&bp->timer);
5079                                 return rc;
5080                         }
5081                         bnx2_enable_int(bp);
5082                 }
5083         }
5084         if (bp->flags & USING_MSI_FLAG) {
5085                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5086         }
5087
5088         netif_start_queue(dev);
5089
5090         return 0;
5091 }
5092
5093 static void
5094 bnx2_reset_task(struct work_struct *work)
5095 {
5096         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5097
5098         if (!netif_running(bp->dev))
5099                 return;
5100
5101         bp->in_reset_task = 1;
5102         bnx2_netif_stop(bp);
5103
5104         bnx2_init_nic(bp);
5105
5106         atomic_set(&bp->intr_sem, 1);
5107         bnx2_netif_start(bp);
5108         bp->in_reset_task = 0;
5109 }
5110
5111 static void
5112 bnx2_tx_timeout(struct net_device *dev)
5113 {
5114         struct bnx2 *bp = netdev_priv(dev);
5115
5116         /* This allows the netif to be shutdown gracefully before resetting */
5117         schedule_work(&bp->reset_task);
5118 }
5119
5120 #ifdef BCM_VLAN
5121 /* Called with rtnl_lock */
5122 static void
5123 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5124 {
5125         struct bnx2 *bp = netdev_priv(dev);
5126
5127         bnx2_netif_stop(bp);
5128
5129         bp->vlgrp = vlgrp;
5130         bnx2_set_rx_mode(dev);
5131
5132         bnx2_netif_start(bp);
5133 }
5134 #endif
5135
5136 /* Called with netif_tx_lock.
5137  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5138  * netif_wake_queue().
5139  */
5140 static int
5141 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5142 {
5143         struct bnx2 *bp = netdev_priv(dev);
5144         dma_addr_t mapping;
5145         struct tx_bd *txbd;
5146         struct sw_bd *tx_buf;
5147         u32 len, vlan_tag_flags, last_frag, mss;
5148         u16 prod, ring_prod;
5149         int i;
5150
5151         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5152                 netif_stop_queue(dev);
5153                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5154                         dev->name);
5155
5156                 return NETDEV_TX_BUSY;
5157         }
5158         len = skb_headlen(skb);
5159         prod = bp->tx_prod;
5160         ring_prod = TX_RING_IDX(prod);
5161
5162         vlan_tag_flags = 0;
5163         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5164                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5165         }
5166
5167         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5168                 vlan_tag_flags |=
5169                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5170         }
5171         if ((mss = skb_shinfo(skb)->gso_size)) {
5172                 u32 tcp_opt_len, ip_tcp_len;
5173                 struct iphdr *iph;
5174
5175                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5176
5177                 tcp_opt_len = tcp_optlen(skb);
5178
5179                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5180                         u32 tcp_off = skb_transport_offset(skb) -
5181                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5182
5183                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5184                                           TX_BD_FLAGS_SW_FLAGS;
5185                         if (likely(tcp_off == 0))
5186                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5187                         else {
5188                                 tcp_off >>= 3;
5189                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5190                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5191                                                   ((tcp_off & 0x10) <<
5192                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5193                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5194                         }
5195                 } else {
5196                         if (skb_header_cloned(skb) &&
5197                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5198                                 dev_kfree_skb(skb);
5199                                 return NETDEV_TX_OK;
5200                         }
5201
5202                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5203
5204                         iph = ip_hdr(skb);
5205                         iph->check = 0;
5206                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5207                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5208                                                                  iph->daddr, 0,
5209                                                                  IPPROTO_TCP,
5210                                                                  0);
5211                         if (tcp_opt_len || (iph->ihl > 5)) {
5212                                 vlan_tag_flags |= ((iph->ihl - 5) +
5213                                                    (tcp_opt_len >> 2)) << 8;
5214                         }
5215                 }
5216         } else
5217                 mss = 0;
5218
5219         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5220
5221         tx_buf = &bp->tx_buf_ring[ring_prod];
5222         tx_buf->skb = skb;
5223         pci_unmap_addr_set(tx_buf, mapping, mapping);
5224
5225         txbd = &bp->tx_desc_ring[ring_prod];
5226
5227         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5228         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5229         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5230         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5231
5232         last_frag = skb_shinfo(skb)->nr_frags;
5233
5234         for (i = 0; i < last_frag; i++) {
5235                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5236
5237                 prod = NEXT_TX_BD(prod);
5238                 ring_prod = TX_RING_IDX(prod);
5239                 txbd = &bp->tx_desc_ring[ring_prod];
5240
5241                 len = frag->size;
5242                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5243                         len, PCI_DMA_TODEVICE);
5244                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5245                                 mapping, mapping);
5246
5247                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5248                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5249                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5250                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5251
5252         }
5253         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5254
5255         prod = NEXT_TX_BD(prod);
5256         bp->tx_prod_bseq += skb->len;
5257
5258         REG_WR16(bp, bp->tx_bidx_addr, prod);
5259         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5260
5261         mmiowb();
5262
5263         bp->tx_prod = prod;
5264         dev->trans_start = jiffies;
5265
5266         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5267                 netif_stop_queue(dev);
5268                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5269                         netif_wake_queue(dev);
5270         }
5271
5272         return NETDEV_TX_OK;
5273 }
5274
5275 /* Called with rtnl_lock */
5276 static int
5277 bnx2_close(struct net_device *dev)
5278 {
5279         struct bnx2 *bp = netdev_priv(dev);
5280         u32 reset_code;
5281
5282         /* Calling flush_scheduled_work() may deadlock because
5283          * linkwatch_event() may be on the workqueue and it will try to get
5284          * the rtnl_lock which we are holding.
5285          */
5286         while (bp->in_reset_task)
5287                 msleep(1);
5288
5289         bnx2_netif_stop(bp);
5290         del_timer_sync(&bp->timer);
5291         if (bp->flags & NO_WOL_FLAG)
5292                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5293         else if (bp->wol)
5294                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5295         else
5296                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5297         bnx2_reset_chip(bp, reset_code);
5298         bnx2_free_irq(bp);
5299         bnx2_free_skbs(bp);
5300         bnx2_free_mem(bp);
5301         bp->link_up = 0;
5302         netif_carrier_off(bp->dev);
5303         bnx2_set_power_state(bp, PCI_D3hot);
5304         return 0;
5305 }
5306
5307 #define GET_NET_STATS64(ctr)                                    \
5308         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5309         (unsigned long) (ctr##_lo)
5310
5311 #define GET_NET_STATS32(ctr)            \
5312         (ctr##_lo)
5313
5314 #if (BITS_PER_LONG == 64)
5315 #define GET_NET_STATS   GET_NET_STATS64
5316 #else
5317 #define GET_NET_STATS   GET_NET_STATS32
5318 #endif
5319
5320 static struct net_device_stats *
5321 bnx2_get_stats(struct net_device *dev)
5322 {
5323         struct bnx2 *bp = netdev_priv(dev);
5324         struct statistics_block *stats_blk = bp->stats_blk;
5325         struct net_device_stats *net_stats = &bp->net_stats;
5326
5327         if (bp->stats_blk == NULL) {
5328                 return net_stats;
5329         }
5330         net_stats->rx_packets =
5331                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5332                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5333                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5334
5335         net_stats->tx_packets =
5336                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5337                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5338                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5339
5340         net_stats->rx_bytes =
5341                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5342
5343         net_stats->tx_bytes =
5344                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5345
5346         net_stats->multicast =
5347                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5348
5349         net_stats->collisions =
5350                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5351
5352         net_stats->rx_length_errors =
5353                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5354                 stats_blk->stat_EtherStatsOverrsizePkts);
5355
5356         net_stats->rx_over_errors =
5357                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5358
5359         net_stats->rx_frame_errors =
5360                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5361
5362         net_stats->rx_crc_errors =
5363                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5364
5365         net_stats->rx_errors = net_stats->rx_length_errors +
5366                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5367                 net_stats->rx_crc_errors;
5368
5369         net_stats->tx_aborted_errors =
5370                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5371                 stats_blk->stat_Dot3StatsLateCollisions);
5372
5373         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5374             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5375                 net_stats->tx_carrier_errors = 0;
5376         else {
5377                 net_stats->tx_carrier_errors =
5378                         (unsigned long)
5379                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5380         }
5381
5382         net_stats->tx_errors =
5383                 (unsigned long)
5384                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5385                 +
5386                 net_stats->tx_aborted_errors +
5387                 net_stats->tx_carrier_errors;
5388
5389         net_stats->rx_missed_errors =
5390                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5391                 stats_blk->stat_FwRxDrop);
5392
5393         return net_stats;
5394 }
5395
5396 /* All ethtool functions called with rtnl_lock */
5397
5398 static int
5399 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5400 {
5401         struct bnx2 *bp = netdev_priv(dev);
5402         int support_serdes = 0, support_copper = 0;
5403
5404         cmd->supported = SUPPORTED_Autoneg;
5405         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5406                 support_serdes = 1;
5407                 support_copper = 1;
5408         } else if (bp->phy_port == PORT_FIBRE)
5409                 support_serdes = 1;
5410         else
5411                 support_copper = 1;
5412
5413         if (support_serdes) {
5414                 cmd->supported |= SUPPORTED_1000baseT_Full |
5415                         SUPPORTED_FIBRE;
5416                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5417                         cmd->supported |= SUPPORTED_2500baseX_Full;
5418
5419         }
5420         if (support_copper) {
5421                 cmd->supported |= SUPPORTED_10baseT_Half |
5422                         SUPPORTED_10baseT_Full |
5423                         SUPPORTED_100baseT_Half |
5424                         SUPPORTED_100baseT_Full |
5425                         SUPPORTED_1000baseT_Full |
5426                         SUPPORTED_TP;
5427
5428         }
5429
5430         spin_lock_bh(&bp->phy_lock);
5431         cmd->port = bp->phy_port;
5432         cmd->advertising = bp->advertising;
5433
5434         if (bp->autoneg & AUTONEG_SPEED) {
5435                 cmd->autoneg = AUTONEG_ENABLE;
5436         }
5437         else {
5438                 cmd->autoneg = AUTONEG_DISABLE;
5439         }
5440
5441         if (netif_carrier_ok(dev)) {
5442                 cmd->speed = bp->line_speed;
5443                 cmd->duplex = bp->duplex;
5444         }
5445         else {
5446                 cmd->speed = -1;
5447                 cmd->duplex = -1;
5448         }
5449         spin_unlock_bh(&bp->phy_lock);
5450
5451         cmd->transceiver = XCVR_INTERNAL;
5452         cmd->phy_address = bp->phy_addr;
5453
5454         return 0;
5455 }
5456
5457 static int
5458 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5459 {
5460         struct bnx2 *bp = netdev_priv(dev);
5461         u8 autoneg = bp->autoneg;
5462         u8 req_duplex = bp->req_duplex;
5463         u16 req_line_speed = bp->req_line_speed;
5464         u32 advertising = bp->advertising;
5465         int err = -EINVAL;
5466
5467         spin_lock_bh(&bp->phy_lock);
5468
5469         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5470                 goto err_out_unlock;
5471
5472         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5473                 goto err_out_unlock;
5474
5475         if (cmd->autoneg == AUTONEG_ENABLE) {
5476                 autoneg |= AUTONEG_SPEED;
5477
5478                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5479
5480                 /* allow advertising 1 speed */
5481                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5482                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5483                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5484                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5485
5486                         if (cmd->port == PORT_FIBRE)
5487                                 goto err_out_unlock;
5488
5489                         advertising = cmd->advertising;
5490
5491                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5492                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5493                             (cmd->port == PORT_TP))
5494                                 goto err_out_unlock;
5495                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5496                         advertising = cmd->advertising;
5497                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5498                         goto err_out_unlock;
5499                 else {
5500                         if (cmd->port == PORT_FIBRE)
5501                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5502                         else
5503                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5504                 }
5505                 advertising |= ADVERTISED_Autoneg;
5506         }
5507         else {
5508                 if (cmd->port == PORT_FIBRE) {
5509                         if ((cmd->speed != SPEED_1000 &&
5510                              cmd->speed != SPEED_2500) ||
5511                             (cmd->duplex != DUPLEX_FULL))
5512                                 goto err_out_unlock;
5513
5514                         if (cmd->speed == SPEED_2500 &&
5515                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5516                                 goto err_out_unlock;
5517                 }
5518                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5519                         goto err_out_unlock;
5520
5521                 autoneg &= ~AUTONEG_SPEED;
5522                 req_line_speed = cmd->speed;
5523                 req_duplex = cmd->duplex;
5524                 advertising = 0;
5525         }
5526
5527         bp->autoneg = autoneg;
5528         bp->advertising = advertising;
5529         bp->req_line_speed = req_line_speed;
5530         bp->req_duplex = req_duplex;
5531
5532         err = bnx2_setup_phy(bp, cmd->port);
5533
5534 err_out_unlock:
5535         spin_unlock_bh(&bp->phy_lock);
5536
5537         return err;
5538 }
5539
5540 static void
5541 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5542 {
5543         struct bnx2 *bp = netdev_priv(dev);
5544
5545         strcpy(info->driver, DRV_MODULE_NAME);
5546         strcpy(info->version, DRV_MODULE_VERSION);
5547         strcpy(info->bus_info, pci_name(bp->pdev));
5548         strcpy(info->fw_version, bp->fw_version);
5549 }
5550
5551 #define BNX2_REGDUMP_LEN                (32 * 1024)
5552
5553 static int
5554 bnx2_get_regs_len(struct net_device *dev)
5555 {
5556         return BNX2_REGDUMP_LEN;
5557 }
5558
5559 static void
5560 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5561 {
5562         u32 *p = _p, i, offset;
5563         u8 *orig_p = _p;
5564         struct bnx2 *bp = netdev_priv(dev);
5565         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5566                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5567                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5568                                  0x1040, 0x1048, 0x1080, 0x10a4,
5569                                  0x1400, 0x1490, 0x1498, 0x14f0,
5570                                  0x1500, 0x155c, 0x1580, 0x15dc,
5571                                  0x1600, 0x1658, 0x1680, 0x16d8,
5572                                  0x1800, 0x1820, 0x1840, 0x1854,
5573                                  0x1880, 0x1894, 0x1900, 0x1984,
5574                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5575                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5576                                  0x2000, 0x2030, 0x23c0, 0x2400,
5577                                  0x2800, 0x2820, 0x2830, 0x2850,
5578                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5579                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5580                                  0x4080, 0x4090, 0x43c0, 0x4458,
5581                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5582                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5583                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5584                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5585                                  0x6800, 0x6848, 0x684c, 0x6860,
5586                                  0x6888, 0x6910, 0x8000 };
5587
5588         regs->version = 0;
5589
5590         memset(p, 0, BNX2_REGDUMP_LEN);
5591
5592         if (!netif_running(bp->dev))
5593                 return;
5594
5595         i = 0;
5596         offset = reg_boundaries[0];
5597         p += offset;
5598         while (offset < BNX2_REGDUMP_LEN) {
5599                 *p++ = REG_RD(bp, offset);
5600                 offset += 4;
5601                 if (offset == reg_boundaries[i + 1]) {
5602                         offset = reg_boundaries[i + 2];
5603                         p = (u32 *) (orig_p + offset);
5604                         i += 2;
5605                 }
5606         }
5607 }
5608
5609 static void
5610 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5611 {
5612         struct bnx2 *bp = netdev_priv(dev);
5613
5614         if (bp->flags & NO_WOL_FLAG) {
5615                 wol->supported = 0;
5616                 wol->wolopts = 0;
5617         }
5618         else {
5619                 wol->supported = WAKE_MAGIC;
5620                 if (bp->wol)
5621                         wol->wolopts = WAKE_MAGIC;
5622                 else
5623                         wol->wolopts = 0;
5624         }
5625         memset(&wol->sopass, 0, sizeof(wol->sopass));
5626 }
5627
5628 static int
5629 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5630 {
5631         struct bnx2 *bp = netdev_priv(dev);
5632
5633         if (wol->wolopts & ~WAKE_MAGIC)
5634                 return -EINVAL;
5635
5636         if (wol->wolopts & WAKE_MAGIC) {
5637                 if (bp->flags & NO_WOL_FLAG)
5638                         return -EINVAL;
5639
5640                 bp->wol = 1;
5641         }
5642         else {
5643                 bp->wol = 0;
5644         }
5645         return 0;
5646 }
5647
5648 static int
5649 bnx2_nway_reset(struct net_device *dev)
5650 {
5651         struct bnx2 *bp = netdev_priv(dev);
5652         u32 bmcr;
5653
5654         if (!(bp->autoneg & AUTONEG_SPEED)) {
5655                 return -EINVAL;
5656         }
5657
5658         spin_lock_bh(&bp->phy_lock);
5659
5660         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5661                 int rc;
5662
5663                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5664                 spin_unlock_bh(&bp->phy_lock);
5665                 return rc;
5666         }
5667
5668         /* Force a link down visible on the other side */
5669         if (bp->phy_flags & PHY_SERDES_FLAG) {
5670                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5671                 spin_unlock_bh(&bp->phy_lock);
5672
5673                 msleep(20);
5674
5675                 spin_lock_bh(&bp->phy_lock);
5676
5677                 bp->current_interval = SERDES_AN_TIMEOUT;
5678                 bp->serdes_an_pending = 1;
5679                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5680         }
5681
5682         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5683         bmcr &= ~BMCR_LOOPBACK;
5684         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5685
5686         spin_unlock_bh(&bp->phy_lock);
5687
5688         return 0;
5689 }
5690
5691 static int
5692 bnx2_get_eeprom_len(struct net_device *dev)
5693 {
5694         struct bnx2 *bp = netdev_priv(dev);
5695
5696         if (bp->flash_info == NULL)
5697                 return 0;
5698
5699         return (int) bp->flash_size;
5700 }
5701
5702 static int
5703 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5704                 u8 *eebuf)
5705 {
5706         struct bnx2 *bp = netdev_priv(dev);
5707         int rc;
5708
5709         /* parameters already validated in ethtool_get_eeprom */
5710
5711         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5712
5713         return rc;
5714 }
5715
5716 static int
5717 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5718                 u8 *eebuf)
5719 {
5720         struct bnx2 *bp = netdev_priv(dev);
5721         int rc;
5722
5723         /* parameters already validated in ethtool_set_eeprom */
5724
5725         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5726
5727         return rc;
5728 }
5729
5730 static int
5731 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5732 {
5733         struct bnx2 *bp = netdev_priv(dev);
5734
5735         memset(coal, 0, sizeof(struct ethtool_coalesce));
5736
5737         coal->rx_coalesce_usecs = bp->rx_ticks;
5738         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5739         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5740         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5741
5742         coal->tx_coalesce_usecs = bp->tx_ticks;
5743         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5744         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5745         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5746
5747         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5748
5749         return 0;
5750 }
5751
5752 static int
5753 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5754 {
5755         struct bnx2 *bp = netdev_priv(dev);
5756
5757         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5758         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5759
5760         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5761         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5762
5763         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5764         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5765
5766         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5767         if (bp->rx_quick_cons_trip_int > 0xff)
5768                 bp->rx_quick_cons_trip_int = 0xff;
5769
5770         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5771         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5772
5773         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5774         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5775
5776         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5777         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5778
5779         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5780         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5781                 0xff;
5782
5783         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5784         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5785                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5786                         bp->stats_ticks = USEC_PER_SEC;
5787         }
5788         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5789         bp->stats_ticks &= 0xffff00;
5790
5791         if (netif_running(bp->dev)) {
5792                 bnx2_netif_stop(bp);
5793                 bnx2_init_nic(bp);
5794                 bnx2_netif_start(bp);
5795         }
5796
5797         return 0;
5798 }
5799
5800 static void
5801 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5802 {
5803         struct bnx2 *bp = netdev_priv(dev);
5804
5805         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5806         ering->rx_mini_max_pending = 0;
5807         ering->rx_jumbo_max_pending = 0;
5808
5809         ering->rx_pending = bp->rx_ring_size;
5810         ering->rx_mini_pending = 0;
5811         ering->rx_jumbo_pending = 0;
5812
5813         ering->tx_max_pending = MAX_TX_DESC_CNT;
5814         ering->tx_pending = bp->tx_ring_size;
5815 }
5816
5817 static int
5818 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5819 {
5820         struct bnx2 *bp = netdev_priv(dev);
5821
5822         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5823                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5824                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5825
5826                 return -EINVAL;
5827         }
5828         if (netif_running(bp->dev)) {
5829                 bnx2_netif_stop(bp);
5830                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5831                 bnx2_free_skbs(bp);
5832                 bnx2_free_mem(bp);
5833         }
5834
5835         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5836         bp->tx_ring_size = ering->tx_pending;
5837
5838         if (netif_running(bp->dev)) {
5839                 int rc;
5840
5841                 rc = bnx2_alloc_mem(bp);
5842                 if (rc)
5843                         return rc;
5844                 bnx2_init_nic(bp);
5845                 bnx2_netif_start(bp);
5846         }
5847
5848         return 0;
5849 }
5850
5851 static void
5852 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5853 {
5854         struct bnx2 *bp = netdev_priv(dev);
5855
5856         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5857         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5858         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5859 }
5860
5861 static int
5862 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5863 {
5864         struct bnx2 *bp = netdev_priv(dev);
5865
5866         bp->req_flow_ctrl = 0;
5867         if (epause->rx_pause)
5868                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5869         if (epause->tx_pause)
5870                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5871
5872         if (epause->autoneg) {
5873                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5874         }
5875         else {
5876                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5877         }
5878
5879         spin_lock_bh(&bp->phy_lock);
5880
5881         bnx2_setup_phy(bp, bp->phy_port);
5882
5883         spin_unlock_bh(&bp->phy_lock);
5884
5885         return 0;
5886 }
5887
5888 static u32
5889 bnx2_get_rx_csum(struct net_device *dev)
5890 {
5891         struct bnx2 *bp = netdev_priv(dev);
5892
5893         return bp->rx_csum;
5894 }
5895
5896 static int
5897 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5898 {
5899         struct bnx2 *bp = netdev_priv(dev);
5900
5901         bp->rx_csum = data;
5902         return 0;
5903 }
5904
5905 static int
5906 bnx2_set_tso(struct net_device *dev, u32 data)
5907 {
5908         struct bnx2 *bp = netdev_priv(dev);
5909
5910         if (data) {
5911                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5912                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5913                         dev->features |= NETIF_F_TSO6;
5914         } else
5915                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5916                                    NETIF_F_TSO_ECN);
5917         return 0;
5918 }
5919
5920 #define BNX2_NUM_STATS 46
5921
5922 static struct {
5923         char string[ETH_GSTRING_LEN];
5924 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5925         { "rx_bytes" },
5926         { "rx_error_bytes" },
5927         { "tx_bytes" },
5928         { "tx_error_bytes" },
5929         { "rx_ucast_packets" },
5930         { "rx_mcast_packets" },
5931         { "rx_bcast_packets" },
5932         { "tx_ucast_packets" },
5933         { "tx_mcast_packets" },
5934         { "tx_bcast_packets" },
5935         { "tx_mac_errors" },
5936         { "tx_carrier_errors" },
5937         { "rx_crc_errors" },
5938         { "rx_align_errors" },
5939         { "tx_single_collisions" },
5940         { "tx_multi_collisions" },
5941         { "tx_deferred" },
5942         { "tx_excess_collisions" },
5943         { "tx_late_collisions" },
5944         { "tx_total_collisions" },
5945         { "rx_fragments" },
5946         { "rx_jabbers" },
5947         { "rx_undersize_packets" },
5948         { "rx_oversize_packets" },
5949         { "rx_64_byte_packets" },
5950         { "rx_65_to_127_byte_packets" },
5951         { "rx_128_to_255_byte_packets" },
5952         { "rx_256_to_511_byte_packets" },
5953         { "rx_512_to_1023_byte_packets" },
5954         { "rx_1024_to_1522_byte_packets" },
5955         { "rx_1523_to_9022_byte_packets" },
5956         { "tx_64_byte_packets" },
5957         { "tx_65_to_127_byte_packets" },
5958         { "tx_128_to_255_byte_packets" },
5959         { "tx_256_to_511_byte_packets" },
5960         { "tx_512_to_1023_byte_packets" },
5961         { "tx_1024_to_1522_byte_packets" },
5962         { "tx_1523_to_9022_byte_packets" },
5963         { "rx_xon_frames" },
5964         { "rx_xoff_frames" },
5965         { "tx_xon_frames" },
5966         { "tx_xoff_frames" },
5967         { "rx_mac_ctrl_frames" },
5968         { "rx_filtered_packets" },
5969         { "rx_discards" },
5970         { "rx_fw_discards" },
5971 };
5972
5973 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5974
5975 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5976     STATS_OFFSET32(stat_IfHCInOctets_hi),
5977     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5978     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5979     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5980     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5981     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5982     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5983     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5984     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5985     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5986     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5987     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5988     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5989     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5990     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5991     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5992     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5993     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5994     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5995     STATS_OFFSET32(stat_EtherStatsCollisions),
5996     STATS_OFFSET32(stat_EtherStatsFragments),
5997     STATS_OFFSET32(stat_EtherStatsJabbers),
5998     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5999     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6000     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6001     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6002     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6003     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6004     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6005     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6006     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6007     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6008     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6009     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6010     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6011     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6012     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6013     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6014     STATS_OFFSET32(stat_XonPauseFramesReceived),
6015     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6016     STATS_OFFSET32(stat_OutXonSent),
6017     STATS_OFFSET32(stat_OutXoffSent),
6018     STATS_OFFSET32(stat_MacControlFramesReceived),
6019     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6020     STATS_OFFSET32(stat_IfInMBUFDiscards),
6021     STATS_OFFSET32(stat_FwRxDrop),
6022 };
6023
6024 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6025  * skipped because of errata.
6026  */
6027 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6028         8,0,8,8,8,8,8,8,8,8,
6029         4,0,4,4,4,4,4,4,4,4,
6030         4,4,4,4,4,4,4,4,4,4,
6031         4,4,4,4,4,4,4,4,4,4,
6032         4,4,4,4,4,4,
6033 };
6034
6035 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6036         8,0,8,8,8,8,8,8,8,8,
6037         4,4,4,4,4,4,4,4,4,4,
6038         4,4,4,4,4,4,4,4,4,4,
6039         4,4,4,4,4,4,4,4,4,4,
6040         4,4,4,4,4,4,
6041 };
6042
6043 #define BNX2_NUM_TESTS 6
6044
6045 static struct {
6046         char string[ETH_GSTRING_LEN];
6047 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6048         { "register_test (offline)" },
6049         { "memory_test (offline)" },
6050         { "loopback_test (offline)" },
6051         { "nvram_test (online)" },
6052         { "interrupt_test (online)" },
6053         { "link_test (online)" },
6054 };
6055
6056 static int
6057 bnx2_self_test_count(struct net_device *dev)
6058 {
6059         return BNX2_NUM_TESTS;
6060 }
6061
6062 static void
6063 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6064 {
6065         struct bnx2 *bp = netdev_priv(dev);
6066
6067         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6068         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6069                 int i;
6070
6071                 bnx2_netif_stop(bp);
6072                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6073                 bnx2_free_skbs(bp);
6074
6075                 if (bnx2_test_registers(bp) != 0) {
6076                         buf[0] = 1;
6077                         etest->flags |= ETH_TEST_FL_FAILED;
6078                 }
6079                 if (bnx2_test_memory(bp) != 0) {
6080                         buf[1] = 1;
6081                         etest->flags |= ETH_TEST_FL_FAILED;
6082                 }
6083                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6084                         etest->flags |= ETH_TEST_FL_FAILED;
6085
6086                 if (!netif_running(bp->dev)) {
6087                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6088                 }
6089                 else {
6090                         bnx2_init_nic(bp);
6091                         bnx2_netif_start(bp);
6092                 }
6093
6094                 /* wait for link up */
6095                 for (i = 0; i < 7; i++) {
6096                         if (bp->link_up)
6097                                 break;
6098                         msleep_interruptible(1000);
6099                 }
6100         }
6101
6102         if (bnx2_test_nvram(bp) != 0) {
6103                 buf[3] = 1;
6104                 etest->flags |= ETH_TEST_FL_FAILED;
6105         }
6106         if (bnx2_test_intr(bp) != 0) {
6107                 buf[4] = 1;
6108                 etest->flags |= ETH_TEST_FL_FAILED;
6109         }
6110
6111         if (bnx2_test_link(bp) != 0) {
6112                 buf[5] = 1;
6113                 etest->flags |= ETH_TEST_FL_FAILED;
6114
6115         }
6116 }
6117
6118 static void
6119 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6120 {
6121         switch (stringset) {
6122         case ETH_SS_STATS:
6123                 memcpy(buf, bnx2_stats_str_arr,
6124                         sizeof(bnx2_stats_str_arr));
6125                 break;
6126         case ETH_SS_TEST:
6127                 memcpy(buf, bnx2_tests_str_arr,
6128                         sizeof(bnx2_tests_str_arr));
6129                 break;
6130         }
6131 }
6132
6133 static int
6134 bnx2_get_stats_count(struct net_device *dev)
6135 {
6136         return BNX2_NUM_STATS;
6137 }
6138
6139 static void
6140 bnx2_get_ethtool_stats(struct net_device *dev,
6141                 struct ethtool_stats *stats, u64 *buf)
6142 {
6143         struct bnx2 *bp = netdev_priv(dev);
6144         int i;
6145         u32 *hw_stats = (u32 *) bp->stats_blk;
6146         u8 *stats_len_arr = NULL;
6147
6148         if (hw_stats == NULL) {
6149                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6150                 return;
6151         }
6152
6153         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6154             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6155             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6156             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6157                 stats_len_arr = bnx2_5706_stats_len_arr;
6158         else
6159                 stats_len_arr = bnx2_5708_stats_len_arr;
6160
6161         for (i = 0; i < BNX2_NUM_STATS; i++) {
6162                 if (stats_len_arr[i] == 0) {
6163                       &n