[BNX2]: Add ring constants.
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x8000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.6.9"
60 #define DRV_MODULE_RELDATE      "December 8, 2007"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bp->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405 }
406
407 static void
408 bnx2_enable_int(struct bnx2 *bp)
409 {
410         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
413
414         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
416
417         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
418 }
419
420 static void
421 bnx2_disable_int_sync(struct bnx2 *bp)
422 {
423         atomic_inc(&bp->intr_sem);
424         bnx2_disable_int(bp);
425         synchronize_irq(bp->pdev->irq);
426 }
427
428 static void
429 bnx2_netif_stop(struct bnx2 *bp)
430 {
431         bnx2_disable_int_sync(bp);
432         if (netif_running(bp->dev)) {
433                 napi_disable(&bp->napi);
434                 netif_tx_disable(bp->dev);
435                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
436         }
437 }
438
439 static void
440 bnx2_netif_start(struct bnx2 *bp)
441 {
442         if (atomic_dec_and_test(&bp->intr_sem)) {
443                 if (netif_running(bp->dev)) {
444                         netif_wake_queue(bp->dev);
445                         napi_enable(&bp->napi);
446                         bnx2_enable_int(bp);
447                 }
448         }
449 }
450
451 static void
452 bnx2_free_mem(struct bnx2 *bp)
453 {
454         int i;
455
456         for (i = 0; i < bp->ctx_pages; i++) {
457                 if (bp->ctx_blk[i]) {
458                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
459                                             bp->ctx_blk[i],
460                                             bp->ctx_blk_mapping[i]);
461                         bp->ctx_blk[i] = NULL;
462                 }
463         }
464         if (bp->status_blk) {
465                 pci_free_consistent(bp->pdev, bp->status_stats_size,
466                                     bp->status_blk, bp->status_blk_mapping);
467                 bp->status_blk = NULL;
468                 bp->stats_blk = NULL;
469         }
470         if (bp->tx_desc_ring) {
471                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
472                                     bp->tx_desc_ring, bp->tx_desc_mapping);
473                 bp->tx_desc_ring = NULL;
474         }
475         kfree(bp->tx_buf_ring);
476         bp->tx_buf_ring = NULL;
477         for (i = 0; i < bp->rx_max_ring; i++) {
478                 if (bp->rx_desc_ring[i])
479                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
480                                             bp->rx_desc_ring[i],
481                                             bp->rx_desc_mapping[i]);
482                 bp->rx_desc_ring[i] = NULL;
483         }
484         vfree(bp->rx_buf_ring);
485         bp->rx_buf_ring = NULL;
486 }
487
488 static int
489 bnx2_alloc_mem(struct bnx2 *bp)
490 {
491         int i, status_blk_size;
492
493         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
494         if (bp->tx_buf_ring == NULL)
495                 return -ENOMEM;
496
497         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
498                                                 &bp->tx_desc_mapping);
499         if (bp->tx_desc_ring == NULL)
500                 goto alloc_mem_err;
501
502         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
503         if (bp->rx_buf_ring == NULL)
504                 goto alloc_mem_err;
505
506         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
507
508         for (i = 0; i < bp->rx_max_ring; i++) {
509                 bp->rx_desc_ring[i] =
510                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
511                                              &bp->rx_desc_mapping[i]);
512                 if (bp->rx_desc_ring[i] == NULL)
513                         goto alloc_mem_err;
514
515         }
516
517         /* Combine status and statistics blocks into one allocation. */
518         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
519         bp->status_stats_size = status_blk_size +
520                                 sizeof(struct statistics_block);
521
522         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
523                                               &bp->status_blk_mapping);
524         if (bp->status_blk == NULL)
525                 goto alloc_mem_err;
526
527         memset(bp->status_blk, 0, bp->status_stats_size);
528
529         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
530                                   status_blk_size);
531
532         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
533
534         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
535                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
536                 if (bp->ctx_pages == 0)
537                         bp->ctx_pages = 1;
538                 for (i = 0; i < bp->ctx_pages; i++) {
539                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
540                                                 BCM_PAGE_SIZE,
541                                                 &bp->ctx_blk_mapping[i]);
542                         if (bp->ctx_blk[i] == NULL)
543                                 goto alloc_mem_err;
544                 }
545         }
546         return 0;
547
548 alloc_mem_err:
549         bnx2_free_mem(bp);
550         return -ENOMEM;
551 }
552
553 static void
554 bnx2_report_fw_link(struct bnx2 *bp)
555 {
556         u32 fw_link_status = 0;
557
558         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
559                 return;
560
561         if (bp->link_up) {
562                 u32 bmsr;
563
564                 switch (bp->line_speed) {
565                 case SPEED_10:
566                         if (bp->duplex == DUPLEX_HALF)
567                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
568                         else
569                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
570                         break;
571                 case SPEED_100:
572                         if (bp->duplex == DUPLEX_HALF)
573                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
574                         else
575                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
576                         break;
577                 case SPEED_1000:
578                         if (bp->duplex == DUPLEX_HALF)
579                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
580                         else
581                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
582                         break;
583                 case SPEED_2500:
584                         if (bp->duplex == DUPLEX_HALF)
585                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
586                         else
587                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
588                         break;
589                 }
590
591                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
592
593                 if (bp->autoneg) {
594                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
595
596                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
597                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
598
599                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
600                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
601                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
602                         else
603                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
604                 }
605         }
606         else
607                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
608
609         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
610 }
611
612 static char *
613 bnx2_xceiver_str(struct bnx2 *bp)
614 {
615         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
616                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
617                  "Copper"));
618 }
619
620 static void
621 bnx2_report_link(struct bnx2 *bp)
622 {
623         if (bp->link_up) {
624                 netif_carrier_on(bp->dev);
625                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
626                        bnx2_xceiver_str(bp));
627
628                 printk("%d Mbps ", bp->line_speed);
629
630                 if (bp->duplex == DUPLEX_FULL)
631                         printk("full duplex");
632                 else
633                         printk("half duplex");
634
635                 if (bp->flow_ctrl) {
636                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
637                                 printk(", receive ");
638                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
639                                         printk("& transmit ");
640                         }
641                         else {
642                                 printk(", transmit ");
643                         }
644                         printk("flow control ON");
645                 }
646                 printk("\n");
647         }
648         else {
649                 netif_carrier_off(bp->dev);
650                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
651                        bnx2_xceiver_str(bp));
652         }
653
654         bnx2_report_fw_link(bp);
655 }
656
657 static void
658 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
659 {
660         u32 local_adv, remote_adv;
661
662         bp->flow_ctrl = 0;
663         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
664                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
665
666                 if (bp->duplex == DUPLEX_FULL) {
667                         bp->flow_ctrl = bp->req_flow_ctrl;
668                 }
669                 return;
670         }
671
672         if (bp->duplex != DUPLEX_FULL) {
673                 return;
674         }
675
676         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
677             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
678                 u32 val;
679
680                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
681                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
682                         bp->flow_ctrl |= FLOW_CTRL_TX;
683                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
684                         bp->flow_ctrl |= FLOW_CTRL_RX;
685                 return;
686         }
687
688         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
689         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
690
691         if (bp->phy_flags & PHY_SERDES_FLAG) {
692                 u32 new_local_adv = 0;
693                 u32 new_remote_adv = 0;
694
695                 if (local_adv & ADVERTISE_1000XPAUSE)
696                         new_local_adv |= ADVERTISE_PAUSE_CAP;
697                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
698                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
699                 if (remote_adv & ADVERTISE_1000XPAUSE)
700                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
701                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
702                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
703
704                 local_adv = new_local_adv;
705                 remote_adv = new_remote_adv;
706         }
707
708         /* See Table 28B-3 of 802.3ab-1999 spec. */
709         if (local_adv & ADVERTISE_PAUSE_CAP) {
710                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
711                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
712                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
713                         }
714                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
715                                 bp->flow_ctrl = FLOW_CTRL_RX;
716                         }
717                 }
718                 else {
719                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
720                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
721                         }
722                 }
723         }
724         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
725                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
726                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
727
728                         bp->flow_ctrl = FLOW_CTRL_TX;
729                 }
730         }
731 }
732
733 static int
734 bnx2_5709s_linkup(struct bnx2 *bp)
735 {
736         u32 val, speed;
737
738         bp->link_up = 1;
739
740         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
741         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
742         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
743
744         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
745                 bp->line_speed = bp->req_line_speed;
746                 bp->duplex = bp->req_duplex;
747                 return 0;
748         }
749         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
750         switch (speed) {
751                 case MII_BNX2_GP_TOP_AN_SPEED_10:
752                         bp->line_speed = SPEED_10;
753                         break;
754                 case MII_BNX2_GP_TOP_AN_SPEED_100:
755                         bp->line_speed = SPEED_100;
756                         break;
757                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
758                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
759                         bp->line_speed = SPEED_1000;
760                         break;
761                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
762                         bp->line_speed = SPEED_2500;
763                         break;
764         }
765         if (val & MII_BNX2_GP_TOP_AN_FD)
766                 bp->duplex = DUPLEX_FULL;
767         else
768                 bp->duplex = DUPLEX_HALF;
769         return 0;
770 }
771
772 static int
773 bnx2_5708s_linkup(struct bnx2 *bp)
774 {
775         u32 val;
776
777         bp->link_up = 1;
778         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
779         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
780                 case BCM5708S_1000X_STAT1_SPEED_10:
781                         bp->line_speed = SPEED_10;
782                         break;
783                 case BCM5708S_1000X_STAT1_SPEED_100:
784                         bp->line_speed = SPEED_100;
785                         break;
786                 case BCM5708S_1000X_STAT1_SPEED_1G:
787                         bp->line_speed = SPEED_1000;
788                         break;
789                 case BCM5708S_1000X_STAT1_SPEED_2G5:
790                         bp->line_speed = SPEED_2500;
791                         break;
792         }
793         if (val & BCM5708S_1000X_STAT1_FD)
794                 bp->duplex = DUPLEX_FULL;
795         else
796                 bp->duplex = DUPLEX_HALF;
797
798         return 0;
799 }
800
801 static int
802 bnx2_5706s_linkup(struct bnx2 *bp)
803 {
804         u32 bmcr, local_adv, remote_adv, common;
805
806         bp->link_up = 1;
807         bp->line_speed = SPEED_1000;
808
809         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
810         if (bmcr & BMCR_FULLDPLX) {
811                 bp->duplex = DUPLEX_FULL;
812         }
813         else {
814                 bp->duplex = DUPLEX_HALF;
815         }
816
817         if (!(bmcr & BMCR_ANENABLE)) {
818                 return 0;
819         }
820
821         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
822         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
823
824         common = local_adv & remote_adv;
825         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
826
827                 if (common & ADVERTISE_1000XFULL) {
828                         bp->duplex = DUPLEX_FULL;
829                 }
830                 else {
831                         bp->duplex = DUPLEX_HALF;
832                 }
833         }
834
835         return 0;
836 }
837
838 static int
839 bnx2_copper_linkup(struct bnx2 *bp)
840 {
841         u32 bmcr;
842
843         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
844         if (bmcr & BMCR_ANENABLE) {
845                 u32 local_adv, remote_adv, common;
846
847                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
848                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
849
850                 common = local_adv & (remote_adv >> 2);
851                 if (common & ADVERTISE_1000FULL) {
852                         bp->line_speed = SPEED_1000;
853                         bp->duplex = DUPLEX_FULL;
854                 }
855                 else if (common & ADVERTISE_1000HALF) {
856                         bp->line_speed = SPEED_1000;
857                         bp->duplex = DUPLEX_HALF;
858                 }
859                 else {
860                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
861                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
862
863                         common = local_adv & remote_adv;
864                         if (common & ADVERTISE_100FULL) {
865                                 bp->line_speed = SPEED_100;
866                                 bp->duplex = DUPLEX_FULL;
867                         }
868                         else if (common & ADVERTISE_100HALF) {
869                                 bp->line_speed = SPEED_100;
870                                 bp->duplex = DUPLEX_HALF;
871                         }
872                         else if (common & ADVERTISE_10FULL) {
873                                 bp->line_speed = SPEED_10;
874                                 bp->duplex = DUPLEX_FULL;
875                         }
876                         else if (common & ADVERTISE_10HALF) {
877                                 bp->line_speed = SPEED_10;
878                                 bp->duplex = DUPLEX_HALF;
879                         }
880                         else {
881                                 bp->line_speed = 0;
882                                 bp->link_up = 0;
883                         }
884                 }
885         }
886         else {
887                 if (bmcr & BMCR_SPEED100) {
888                         bp->line_speed = SPEED_100;
889                 }
890                 else {
891                         bp->line_speed = SPEED_10;
892                 }
893                 if (bmcr & BMCR_FULLDPLX) {
894                         bp->duplex = DUPLEX_FULL;
895                 }
896                 else {
897                         bp->duplex = DUPLEX_HALF;
898                 }
899         }
900
901         return 0;
902 }
903
904 static int
905 bnx2_set_mac_link(struct bnx2 *bp)
906 {
907         u32 val;
908
909         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
910         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
911                 (bp->duplex == DUPLEX_HALF)) {
912                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
913         }
914
915         /* Configure the EMAC mode register. */
916         val = REG_RD(bp, BNX2_EMAC_MODE);
917
918         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
919                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
920                 BNX2_EMAC_MODE_25G_MODE);
921
922         if (bp->link_up) {
923                 switch (bp->line_speed) {
924                         case SPEED_10:
925                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
926                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
927                                         break;
928                                 }
929                                 /* fall through */
930                         case SPEED_100:
931                                 val |= BNX2_EMAC_MODE_PORT_MII;
932                                 break;
933                         case SPEED_2500:
934                                 val |= BNX2_EMAC_MODE_25G_MODE;
935                                 /* fall through */
936                         case SPEED_1000:
937                                 val |= BNX2_EMAC_MODE_PORT_GMII;
938                                 break;
939                 }
940         }
941         else {
942                 val |= BNX2_EMAC_MODE_PORT_GMII;
943         }
944
945         /* Set the MAC to operate in the appropriate duplex mode. */
946         if (bp->duplex == DUPLEX_HALF)
947                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
948         REG_WR(bp, BNX2_EMAC_MODE, val);
949
950         /* Enable/disable rx PAUSE. */
951         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
952
953         if (bp->flow_ctrl & FLOW_CTRL_RX)
954                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
955         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
956
957         /* Enable/disable tx PAUSE. */
958         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
959         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
960
961         if (bp->flow_ctrl & FLOW_CTRL_TX)
962                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
963         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
964
965         /* Acknowledge the interrupt. */
966         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
967
968         return 0;
969 }
970
971 static void
972 bnx2_enable_bmsr1(struct bnx2 *bp)
973 {
974         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
975             (CHIP_NUM(bp) == CHIP_NUM_5709))
976                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
977                                MII_BNX2_BLK_ADDR_GP_STATUS);
978 }
979
980 static void
981 bnx2_disable_bmsr1(struct bnx2 *bp)
982 {
983         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
984             (CHIP_NUM(bp) == CHIP_NUM_5709))
985                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
986                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
987 }
988
989 static int
990 bnx2_test_and_enable_2g5(struct bnx2 *bp)
991 {
992         u32 up1;
993         int ret = 1;
994
995         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
996                 return 0;
997
998         if (bp->autoneg & AUTONEG_SPEED)
999                 bp->advertising |= ADVERTISED_2500baseX_Full;
1000
1001         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1002                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1003
1004         bnx2_read_phy(bp, bp->mii_up1, &up1);
1005         if (!(up1 & BCM5708S_UP1_2G5)) {
1006                 up1 |= BCM5708S_UP1_2G5;
1007                 bnx2_write_phy(bp, bp->mii_up1, up1);
1008                 ret = 0;
1009         }
1010
1011         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1012                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1013                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1014
1015         return ret;
1016 }
1017
1018 static int
1019 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1020 {
1021         u32 up1;
1022         int ret = 0;
1023
1024         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1025                 return 0;
1026
1027         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1028                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1029
1030         bnx2_read_phy(bp, bp->mii_up1, &up1);
1031         if (up1 & BCM5708S_UP1_2G5) {
1032                 up1 &= ~BCM5708S_UP1_2G5;
1033                 bnx2_write_phy(bp, bp->mii_up1, up1);
1034                 ret = 1;
1035         }
1036
1037         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1038                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1039                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1040
1041         return ret;
1042 }
1043
1044 static void
1045 bnx2_enable_forced_2g5(struct bnx2 *bp)
1046 {
1047         u32 bmcr;
1048
1049         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1050                 return;
1051
1052         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1053                 u32 val;
1054
1055                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1057                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1058                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1059                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1060                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1061
1062                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1063                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1064                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1065
1066         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1067                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1068                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1069         }
1070
1071         if (bp->autoneg & AUTONEG_SPEED) {
1072                 bmcr &= ~BMCR_ANENABLE;
1073                 if (bp->req_duplex == DUPLEX_FULL)
1074                         bmcr |= BMCR_FULLDPLX;
1075         }
1076         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1077 }
1078
1079 static void
1080 bnx2_disable_forced_2g5(struct bnx2 *bp)
1081 {
1082         u32 bmcr;
1083
1084         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1085                 return;
1086
1087         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1088                 u32 val;
1089
1090                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1091                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1092                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1093                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1094                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1095
1096                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1097                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1099
1100         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1101                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1102                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1103         }
1104
1105         if (bp->autoneg & AUTONEG_SPEED)
1106                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1107         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1108 }
1109
1110 static int
1111 bnx2_set_link(struct bnx2 *bp)
1112 {
1113         u32 bmsr;
1114         u8 link_up;
1115
1116         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1117                 bp->link_up = 1;
1118                 return 0;
1119         }
1120
1121         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1122                 return 0;
1123
1124         link_up = bp->link_up;
1125
1126         bnx2_enable_bmsr1(bp);
1127         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1128         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1129         bnx2_disable_bmsr1(bp);
1130
1131         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1132             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1133                 u32 val;
1134
1135                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1136                 if (val & BNX2_EMAC_STATUS_LINK)
1137                         bmsr |= BMSR_LSTATUS;
1138                 else
1139                         bmsr &= ~BMSR_LSTATUS;
1140         }
1141
1142         if (bmsr & BMSR_LSTATUS) {
1143                 bp->link_up = 1;
1144
1145                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1146                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1147                                 bnx2_5706s_linkup(bp);
1148                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1149                                 bnx2_5708s_linkup(bp);
1150                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1151                                 bnx2_5709s_linkup(bp);
1152                 }
1153                 else {
1154                         bnx2_copper_linkup(bp);
1155                 }
1156                 bnx2_resolve_flow_ctrl(bp);
1157         }
1158         else {
1159                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1160                     (bp->autoneg & AUTONEG_SPEED))
1161                         bnx2_disable_forced_2g5(bp);
1162
1163                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1164                 bp->link_up = 0;
1165         }
1166
1167         if (bp->link_up != link_up) {
1168                 bnx2_report_link(bp);
1169         }
1170
1171         bnx2_set_mac_link(bp);
1172
1173         return 0;
1174 }
1175
1176 static int
1177 bnx2_reset_phy(struct bnx2 *bp)
1178 {
1179         int i;
1180         u32 reg;
1181
1182         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1183
1184 #define PHY_RESET_MAX_WAIT 100
1185         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1186                 udelay(10);
1187
1188                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1189                 if (!(reg & BMCR_RESET)) {
1190                         udelay(20);
1191                         break;
1192                 }
1193         }
1194         if (i == PHY_RESET_MAX_WAIT) {
1195                 return -EBUSY;
1196         }
1197         return 0;
1198 }
1199
1200 static u32
1201 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1202 {
1203         u32 adv = 0;
1204
1205         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1206                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1207
1208                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1209                         adv = ADVERTISE_1000XPAUSE;
1210                 }
1211                 else {
1212                         adv = ADVERTISE_PAUSE_CAP;
1213                 }
1214         }
1215         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1216                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1217                         adv = ADVERTISE_1000XPSE_ASYM;
1218                 }
1219                 else {
1220                         adv = ADVERTISE_PAUSE_ASYM;
1221                 }
1222         }
1223         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1224                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1226                 }
1227                 else {
1228                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1229                 }
1230         }
1231         return adv;
1232 }
1233
1234 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1235
1236 static int
1237 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1238 {
1239         u32 speed_arg = 0, pause_adv;
1240
1241         pause_adv = bnx2_phy_get_pause_adv(bp);
1242
1243         if (bp->autoneg & AUTONEG_SPEED) {
1244                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1245                 if (bp->advertising & ADVERTISED_10baseT_Half)
1246                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1247                 if (bp->advertising & ADVERTISED_10baseT_Full)
1248                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1249                 if (bp->advertising & ADVERTISED_100baseT_Half)
1250                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1251                 if (bp->advertising & ADVERTISED_100baseT_Full)
1252                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1253                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1254                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1255                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1256                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1257         } else {
1258                 if (bp->req_line_speed == SPEED_2500)
1259                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1260                 else if (bp->req_line_speed == SPEED_1000)
1261                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1262                 else if (bp->req_line_speed == SPEED_100) {
1263                         if (bp->req_duplex == DUPLEX_FULL)
1264                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1265                         else
1266                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1267                 } else if (bp->req_line_speed == SPEED_10) {
1268                         if (bp->req_duplex == DUPLEX_FULL)
1269                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1270                         else
1271                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1272                 }
1273         }
1274
1275         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1276                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1277         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1278                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1279
1280         if (port == PORT_TP)
1281                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1282                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1283
1284         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1285
1286         spin_unlock_bh(&bp->phy_lock);
1287         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1288         spin_lock_bh(&bp->phy_lock);
1289
1290         return 0;
1291 }
1292
1293 static int
1294 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1295 {
1296         u32 adv, bmcr;
1297         u32 new_adv = 0;
1298
1299         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1300                 return (bnx2_setup_remote_phy(bp, port));
1301
1302         if (!(bp->autoneg & AUTONEG_SPEED)) {
1303                 u32 new_bmcr;
1304                 int force_link_down = 0;
1305
1306                 if (bp->req_line_speed == SPEED_2500) {
1307                         if (!bnx2_test_and_enable_2g5(bp))
1308                                 force_link_down = 1;
1309                 } else if (bp->req_line_speed == SPEED_1000) {
1310                         if (bnx2_test_and_disable_2g5(bp))
1311                                 force_link_down = 1;
1312                 }
1313                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1314                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1315
1316                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1317                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1318                 new_bmcr |= BMCR_SPEED1000;
1319
1320                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1321                         if (bp->req_line_speed == SPEED_2500)
1322                                 bnx2_enable_forced_2g5(bp);
1323                         else if (bp->req_line_speed == SPEED_1000) {
1324                                 bnx2_disable_forced_2g5(bp);
1325                                 new_bmcr &= ~0x2000;
1326                         }
1327
1328                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1329                         if (bp->req_line_speed == SPEED_2500)
1330                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1331                         else
1332                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1333                 }
1334
1335                 if (bp->req_duplex == DUPLEX_FULL) {
1336                         adv |= ADVERTISE_1000XFULL;
1337                         new_bmcr |= BMCR_FULLDPLX;
1338                 }
1339                 else {
1340                         adv |= ADVERTISE_1000XHALF;
1341                         new_bmcr &= ~BMCR_FULLDPLX;
1342                 }
1343                 if ((new_bmcr != bmcr) || (force_link_down)) {
1344                         /* Force a link down visible on the other side */
1345                         if (bp->link_up) {
1346                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1347                                                ~(ADVERTISE_1000XFULL |
1348                                                  ADVERTISE_1000XHALF));
1349                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1350                                         BMCR_ANRESTART | BMCR_ANENABLE);
1351
1352                                 bp->link_up = 0;
1353                                 netif_carrier_off(bp->dev);
1354                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1355                                 bnx2_report_link(bp);
1356                         }
1357                         bnx2_write_phy(bp, bp->mii_adv, adv);
1358                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1359                 } else {
1360                         bnx2_resolve_flow_ctrl(bp);
1361                         bnx2_set_mac_link(bp);
1362                 }
1363                 return 0;
1364         }
1365
1366         bnx2_test_and_enable_2g5(bp);
1367
1368         if (bp->advertising & ADVERTISED_1000baseT_Full)
1369                 new_adv |= ADVERTISE_1000XFULL;
1370
1371         new_adv |= bnx2_phy_get_pause_adv(bp);
1372
1373         bnx2_read_phy(bp, bp->mii_adv, &adv);
1374         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1375
1376         bp->serdes_an_pending = 0;
1377         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1378                 /* Force a link down visible on the other side */
1379                 if (bp->link_up) {
1380                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1381                         spin_unlock_bh(&bp->phy_lock);
1382                         msleep(20);
1383                         spin_lock_bh(&bp->phy_lock);
1384                 }
1385
1386                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1387                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1388                         BMCR_ANENABLE);
1389                 /* Speed up link-up time when the link partner
1390                  * does not autonegotiate which is very common
1391                  * in blade servers. Some blade servers use
1392                  * IPMI for kerboard input and it's important
1393                  * to minimize link disruptions. Autoneg. involves
1394                  * exchanging base pages plus 3 next pages and
1395                  * normally completes in about 120 msec.
1396                  */
1397                 bp->current_interval = SERDES_AN_TIMEOUT;
1398                 bp->serdes_an_pending = 1;
1399                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1400         } else {
1401                 bnx2_resolve_flow_ctrl(bp);
1402                 bnx2_set_mac_link(bp);
1403         }
1404
1405         return 0;
1406 }
1407
1408 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1409         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1410                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1411                 (ADVERTISED_1000baseT_Full)
1412
1413 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1414         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1415         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1416         ADVERTISED_1000baseT_Full)
1417
1418 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1419         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1420
1421 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1422
1423 static void
1424 bnx2_set_default_remote_link(struct bnx2 *bp)
1425 {
1426         u32 link;
1427
1428         if (bp->phy_port == PORT_TP)
1429                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1430         else
1431                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1432
1433         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1434                 bp->req_line_speed = 0;
1435                 bp->autoneg |= AUTONEG_SPEED;
1436                 bp->advertising = ADVERTISED_Autoneg;
1437                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1438                         bp->advertising |= ADVERTISED_10baseT_Half;
1439                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1440                         bp->advertising |= ADVERTISED_10baseT_Full;
1441                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1442                         bp->advertising |= ADVERTISED_100baseT_Half;
1443                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1444                         bp->advertising |= ADVERTISED_100baseT_Full;
1445                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1446                         bp->advertising |= ADVERTISED_1000baseT_Full;
1447                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1448                         bp->advertising |= ADVERTISED_2500baseX_Full;
1449         } else {
1450                 bp->autoneg = 0;
1451                 bp->advertising = 0;
1452                 bp->req_duplex = DUPLEX_FULL;
1453                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1454                         bp->req_line_speed = SPEED_10;
1455                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1456                                 bp->req_duplex = DUPLEX_HALF;
1457                 }
1458                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1459                         bp->req_line_speed = SPEED_100;
1460                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1461                                 bp->req_duplex = DUPLEX_HALF;
1462                 }
1463                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1464                         bp->req_line_speed = SPEED_1000;
1465                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1466                         bp->req_line_speed = SPEED_2500;
1467         }
1468 }
1469
1470 static void
1471 bnx2_set_default_link(struct bnx2 *bp)
1472 {
1473         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1474                 return bnx2_set_default_remote_link(bp);
1475
1476         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1477         bp->req_line_speed = 0;
1478         if (bp->phy_flags & PHY_SERDES_FLAG) {
1479                 u32 reg;
1480
1481                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1482
1483                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1484                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1485                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1486                         bp->autoneg = 0;
1487                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1488                         bp->req_duplex = DUPLEX_FULL;
1489                 }
1490         } else
1491                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1492 }
1493
1494 static void
1495 bnx2_send_heart_beat(struct bnx2 *bp)
1496 {
1497         u32 msg;
1498         u32 addr;
1499
1500         spin_lock(&bp->indirect_lock);
1501         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1502         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1503         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1504         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1505         spin_unlock(&bp->indirect_lock);
1506 }
1507
1508 static void
1509 bnx2_remote_phy_event(struct bnx2 *bp)
1510 {
1511         u32 msg;
1512         u8 link_up = bp->link_up;
1513         u8 old_port;
1514
1515         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1516
1517         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1518                 bnx2_send_heart_beat(bp);
1519
1520         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1521
1522         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1523                 bp->link_up = 0;
1524         else {
1525                 u32 speed;
1526
1527                 bp->link_up = 1;
1528                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1529                 bp->duplex = DUPLEX_FULL;
1530                 switch (speed) {
1531                         case BNX2_LINK_STATUS_10HALF:
1532                                 bp->duplex = DUPLEX_HALF;
1533                         case BNX2_LINK_STATUS_10FULL:
1534                                 bp->line_speed = SPEED_10;
1535                                 break;
1536                         case BNX2_LINK_STATUS_100HALF:
1537                                 bp->duplex = DUPLEX_HALF;
1538                         case BNX2_LINK_STATUS_100BASE_T4:
1539                         case BNX2_LINK_STATUS_100FULL:
1540                                 bp->line_speed = SPEED_100;
1541                                 break;
1542                         case BNX2_LINK_STATUS_1000HALF:
1543                                 bp->duplex = DUPLEX_HALF;
1544                         case BNX2_LINK_STATUS_1000FULL:
1545                                 bp->line_speed = SPEED_1000;
1546                                 break;
1547                         case BNX2_LINK_STATUS_2500HALF:
1548                                 bp->duplex = DUPLEX_HALF;
1549                         case BNX2_LINK_STATUS_2500FULL:
1550                                 bp->line_speed = SPEED_2500;
1551                                 break;
1552                         default:
1553                                 bp->line_speed = 0;
1554                                 break;
1555                 }
1556
1557                 spin_lock(&bp->phy_lock);
1558                 bp->flow_ctrl = 0;
1559                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1560                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1561                         if (bp->duplex == DUPLEX_FULL)
1562                                 bp->flow_ctrl = bp->req_flow_ctrl;
1563                 } else {
1564                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1565                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1566                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1567                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1568                 }
1569
1570                 old_port = bp->phy_port;
1571                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1572                         bp->phy_port = PORT_FIBRE;
1573                 else
1574                         bp->phy_port = PORT_TP;
1575
1576                 if (old_port != bp->phy_port)
1577                         bnx2_set_default_link(bp);
1578
1579                 spin_unlock(&bp->phy_lock);
1580         }
1581         if (bp->link_up != link_up)
1582                 bnx2_report_link(bp);
1583
1584         bnx2_set_mac_link(bp);
1585 }
1586
1587 static int
1588 bnx2_set_remote_link(struct bnx2 *bp)
1589 {
1590         u32 evt_code;
1591
1592         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1593         switch (evt_code) {
1594                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1595                         bnx2_remote_phy_event(bp);
1596                         break;
1597                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1598                 default:
1599                         bnx2_send_heart_beat(bp);
1600                         break;
1601         }
1602         return 0;
1603 }
1604
1605 static int
1606 bnx2_setup_copper_phy(struct bnx2 *bp)
1607 {
1608         u32 bmcr;
1609         u32 new_bmcr;
1610
1611         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1612
1613         if (bp->autoneg & AUTONEG_SPEED) {
1614                 u32 adv_reg, adv1000_reg;
1615                 u32 new_adv_reg = 0;
1616                 u32 new_adv1000_reg = 0;
1617
1618                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1619                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1620                         ADVERTISE_PAUSE_ASYM);
1621
1622                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1623                 adv1000_reg &= PHY_ALL_1000_SPEED;
1624
1625                 if (bp->advertising & ADVERTISED_10baseT_Half)
1626                         new_adv_reg |= ADVERTISE_10HALF;
1627                 if (bp->advertising & ADVERTISED_10baseT_Full)
1628                         new_adv_reg |= ADVERTISE_10FULL;
1629                 if (bp->advertising & ADVERTISED_100baseT_Half)
1630                         new_adv_reg |= ADVERTISE_100HALF;
1631                 if (bp->advertising & ADVERTISED_100baseT_Full)
1632                         new_adv_reg |= ADVERTISE_100FULL;
1633                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1634                         new_adv1000_reg |= ADVERTISE_1000FULL;
1635
1636                 new_adv_reg |= ADVERTISE_CSMA;
1637
1638                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1639
1640                 if ((adv1000_reg != new_adv1000_reg) ||
1641                         (adv_reg != new_adv_reg) ||
1642                         ((bmcr & BMCR_ANENABLE) == 0)) {
1643
1644                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1645                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1646                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1647                                 BMCR_ANENABLE);
1648                 }
1649                 else if (bp->link_up) {
1650                         /* Flow ctrl may have changed from auto to forced */
1651                         /* or vice-versa. */
1652
1653                         bnx2_resolve_flow_ctrl(bp);
1654                         bnx2_set_mac_link(bp);
1655                 }
1656                 return 0;
1657         }
1658
1659         new_bmcr = 0;
1660         if (bp->req_line_speed == SPEED_100) {
1661                 new_bmcr |= BMCR_SPEED100;
1662         }
1663         if (bp->req_duplex == DUPLEX_FULL) {
1664                 new_bmcr |= BMCR_FULLDPLX;
1665         }
1666         if (new_bmcr != bmcr) {
1667                 u32 bmsr;
1668
1669                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1670                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1671
1672                 if (bmsr & BMSR_LSTATUS) {
1673                         /* Force link down */
1674                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1675                         spin_unlock_bh(&bp->phy_lock);
1676                         msleep(50);
1677                         spin_lock_bh(&bp->phy_lock);
1678
1679                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1680                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1681                 }
1682
1683                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1684
1685                 /* Normally, the new speed is setup after the link has
1686                  * gone down and up again. In some cases, link will not go
1687                  * down so we need to set up the new speed here.
1688                  */
1689                 if (bmsr & BMSR_LSTATUS) {
1690                         bp->line_speed = bp->req_line_speed;
1691                         bp->duplex = bp->req_duplex;
1692                         bnx2_resolve_flow_ctrl(bp);
1693                         bnx2_set_mac_link(bp);
1694                 }
1695         } else {
1696                 bnx2_resolve_flow_ctrl(bp);
1697                 bnx2_set_mac_link(bp);
1698         }
1699         return 0;
1700 }
1701
1702 static int
1703 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1704 {
1705         if (bp->loopback == MAC_LOOPBACK)
1706                 return 0;
1707
1708         if (bp->phy_flags & PHY_SERDES_FLAG) {
1709                 return (bnx2_setup_serdes_phy(bp, port));
1710         }
1711         else {
1712                 return (bnx2_setup_copper_phy(bp));
1713         }
1714 }
1715
1716 static int
1717 bnx2_init_5709s_phy(struct bnx2 *bp)
1718 {
1719         u32 val;
1720
1721         bp->mii_bmcr = MII_BMCR + 0x10;
1722         bp->mii_bmsr = MII_BMSR + 0x10;
1723         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1724         bp->mii_adv = MII_ADVERTISE + 0x10;
1725         bp->mii_lpa = MII_LPA + 0x10;
1726         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1727
1728         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1729         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1730
1731         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1732         bnx2_reset_phy(bp);
1733
1734         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1735
1736         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1737         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1738         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1739         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1740
1741         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1742         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1743         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1744                 val |= BCM5708S_UP1_2G5;
1745         else
1746                 val &= ~BCM5708S_UP1_2G5;
1747         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1748
1749         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1750         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1751         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1752         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1753
1754         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1755
1756         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1757               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1758         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1759
1760         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1761
1762         return 0;
1763 }
1764
1765 static int
1766 bnx2_init_5708s_phy(struct bnx2 *bp)
1767 {
1768         u32 val;
1769
1770         bnx2_reset_phy(bp);
1771
1772         bp->mii_up1 = BCM5708S_UP1;
1773
1774         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1775         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1776         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1777
1778         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1779         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1780         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1781
1782         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1783         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1784         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1785
1786         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1787                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1788                 val |= BCM5708S_UP1_2G5;
1789                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1790         }
1791
1792         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1793             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1794             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1795                 /* increase tx signal amplitude */
1796                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1797                                BCM5708S_BLK_ADDR_TX_MISC);
1798                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1799                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1800                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1801                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1802         }
1803
1804         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1805               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1806
1807         if (val) {
1808                 u32 is_backplane;
1809
1810                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1811                                           BNX2_SHARED_HW_CFG_CONFIG);
1812                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1813                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1814                                        BCM5708S_BLK_ADDR_TX_MISC);
1815                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1816                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1817                                        BCM5708S_BLK_ADDR_DIG);
1818                 }
1819         }
1820         return 0;
1821 }
1822
1823 static int
1824 bnx2_init_5706s_phy(struct bnx2 *bp)
1825 {
1826         bnx2_reset_phy(bp);
1827
1828         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1829
1830         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1831                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1832
1833         if (bp->dev->mtu > 1500) {
1834                 u32 val;
1835
1836                 /* Set extended packet length bit */
1837                 bnx2_write_phy(bp, 0x18, 0x7);
1838                 bnx2_read_phy(bp, 0x18, &val);
1839                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1840
1841                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1842                 bnx2_read_phy(bp, 0x1c, &val);
1843                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1844         }
1845         else {
1846                 u32 val;
1847
1848                 bnx2_write_phy(bp, 0x18, 0x7);
1849                 bnx2_read_phy(bp, 0x18, &val);
1850                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1851
1852                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1853                 bnx2_read_phy(bp, 0x1c, &val);
1854                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1855         }
1856
1857         return 0;
1858 }
1859
1860 static int
1861 bnx2_init_copper_phy(struct bnx2 *bp)
1862 {
1863         u32 val;
1864
1865         bnx2_reset_phy(bp);
1866
1867         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1868                 bnx2_write_phy(bp, 0x18, 0x0c00);
1869                 bnx2_write_phy(bp, 0x17, 0x000a);
1870                 bnx2_write_phy(bp, 0x15, 0x310b);
1871                 bnx2_write_phy(bp, 0x17, 0x201f);
1872                 bnx2_write_phy(bp, 0x15, 0x9506);
1873                 bnx2_write_phy(bp, 0x17, 0x401f);
1874                 bnx2_write_phy(bp, 0x15, 0x14e2);
1875                 bnx2_write_phy(bp, 0x18, 0x0400);
1876         }
1877
1878         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1879                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1880                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1881                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1882                 val &= ~(1 << 8);
1883                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1884         }
1885
1886         if (bp->dev->mtu > 1500) {
1887                 /* Set extended packet length bit */
1888                 bnx2_write_phy(bp, 0x18, 0x7);
1889                 bnx2_read_phy(bp, 0x18, &val);
1890                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1891
1892                 bnx2_read_phy(bp, 0x10, &val);
1893                 bnx2_write_phy(bp, 0x10, val | 0x1);
1894         }
1895         else {
1896                 bnx2_write_phy(bp, 0x18, 0x7);
1897                 bnx2_read_phy(bp, 0x18, &val);
1898                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1899
1900                 bnx2_read_phy(bp, 0x10, &val);
1901                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1902         }
1903
1904         /* ethernet@wirespeed */
1905         bnx2_write_phy(bp, 0x18, 0x7007);
1906         bnx2_read_phy(bp, 0x18, &val);
1907         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1908         return 0;
1909 }
1910
1911
1912 static int
1913 bnx2_init_phy(struct bnx2 *bp)
1914 {
1915         u32 val;
1916         int rc = 0;
1917
1918         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1919         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1920
1921         bp->mii_bmcr = MII_BMCR;
1922         bp->mii_bmsr = MII_BMSR;
1923         bp->mii_bmsr1 = MII_BMSR;
1924         bp->mii_adv = MII_ADVERTISE;
1925         bp->mii_lpa = MII_LPA;
1926
1927         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1928
1929         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1930                 goto setup_phy;
1931
1932         bnx2_read_phy(bp, MII_PHYSID1, &val);
1933         bp->phy_id = val << 16;
1934         bnx2_read_phy(bp, MII_PHYSID2, &val);
1935         bp->phy_id |= val & 0xffff;
1936
1937         if (bp->phy_flags & PHY_SERDES_FLAG) {
1938                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1939                         rc = bnx2_init_5706s_phy(bp);
1940                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1941                         rc = bnx2_init_5708s_phy(bp);
1942                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1943                         rc = bnx2_init_5709s_phy(bp);
1944         }
1945         else {
1946                 rc = bnx2_init_copper_phy(bp);
1947         }
1948
1949 setup_phy:
1950         if (!rc)
1951                 rc = bnx2_setup_phy(bp, bp->phy_port);
1952
1953         return rc;
1954 }
1955
1956 static int
1957 bnx2_set_mac_loopback(struct bnx2 *bp)
1958 {
1959         u32 mac_mode;
1960
1961         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1962         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1963         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1964         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1965         bp->link_up = 1;
1966         return 0;
1967 }
1968
1969 static int bnx2_test_link(struct bnx2 *);
1970
1971 static int
1972 bnx2_set_phy_loopback(struct bnx2 *bp)
1973 {
1974         u32 mac_mode;
1975         int rc, i;
1976
1977         spin_lock_bh(&bp->phy_lock);
1978         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1979                             BMCR_SPEED1000);
1980         spin_unlock_bh(&bp->phy_lock);
1981         if (rc)
1982                 return rc;
1983
1984         for (i = 0; i < 10; i++) {
1985                 if (bnx2_test_link(bp) == 0)
1986                         break;
1987                 msleep(100);
1988         }
1989
1990         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1992                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1993                       BNX2_EMAC_MODE_25G_MODE);
1994
1995         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1996         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1997         bp->link_up = 1;
1998         return 0;
1999 }
2000
2001 static int
2002 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2003 {
2004         int i;
2005         u32 val;
2006
2007         bp->fw_wr_seq++;
2008         msg_data |= bp->fw_wr_seq;
2009
2010         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2011
2012         /* wait for an acknowledgement. */
2013         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2014                 msleep(10);
2015
2016                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2017
2018                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2019                         break;
2020         }
2021         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2022                 return 0;
2023
2024         /* If we timed out, inform the firmware that this is the case. */
2025         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2026                 if (!silent)
2027                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2028                                             "%x\n", msg_data);
2029
2030                 msg_data &= ~BNX2_DRV_MSG_CODE;
2031                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2032
2033                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2034
2035                 return -EBUSY;
2036         }
2037
2038         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2039                 return -EIO;
2040
2041         return 0;
2042 }
2043
2044 static int
2045 bnx2_init_5709_context(struct bnx2 *bp)
2046 {
2047         int i, ret = 0;
2048         u32 val;
2049
2050         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2051         val |= (BCM_PAGE_BITS - 8) << 16;
2052         REG_WR(bp, BNX2_CTX_COMMAND, val);
2053         for (i = 0; i < 10; i++) {
2054                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2055                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2056                         break;
2057                 udelay(2);
2058         }
2059         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2060                 return -EBUSY;
2061
2062         for (i = 0; i < bp->ctx_pages; i++) {
2063                 int j;
2064
2065                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2066                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2067                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2068                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2069                        (u64) bp->ctx_blk_mapping[i] >> 32);
2070                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2071                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2072                 for (j = 0; j < 10; j++) {
2073
2074                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2075                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2076                                 break;
2077                         udelay(5);
2078                 }
2079                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2080                         ret = -EBUSY;
2081                         break;
2082                 }
2083         }
2084         return ret;
2085 }
2086
2087 static void
2088 bnx2_init_context(struct bnx2 *bp)
2089 {
2090         u32 vcid;
2091
2092         vcid = 96;
2093         while (vcid) {
2094                 u32 vcid_addr, pcid_addr, offset;
2095                 int i;
2096
2097                 vcid--;
2098
2099                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2100                         u32 new_vcid;
2101
2102                         vcid_addr = GET_PCID_ADDR(vcid);
2103                         if (vcid & 0x8) {
2104                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2105                         }
2106                         else {
2107                                 new_vcid = vcid;
2108                         }
2109                         pcid_addr = GET_PCID_ADDR(new_vcid);
2110                 }
2111                 else {
2112                         vcid_addr = GET_CID_ADDR(vcid);
2113                         pcid_addr = vcid_addr;
2114                 }
2115
2116                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2117                         vcid_addr += (i << PHY_CTX_SHIFT);
2118                         pcid_addr += (i << PHY_CTX_SHIFT);
2119
2120                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2121                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2122
2123                         /* Zero out the context. */
2124                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2125                                 CTX_WR(bp, 0x00, offset, 0);
2126
2127                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2128                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2129                 }
2130         }
2131 }
2132
2133 static int
2134 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2135 {
2136         u16 *good_mbuf;
2137         u32 good_mbuf_cnt;
2138         u32 val;
2139
2140         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2141         if (good_mbuf == NULL) {
2142                 printk(KERN_ERR PFX "Failed to allocate memory in "
2143                                     "bnx2_alloc_bad_rbuf\n");
2144                 return -ENOMEM;
2145         }
2146
2147         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2148                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2149
2150         good_mbuf_cnt = 0;
2151
2152         /* Allocate a bunch of mbufs and save the good ones in an array. */
2153         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2154         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2155                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2156
2157                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2158
2159                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2160
2161                 /* The addresses with Bit 9 set are bad memory blocks. */
2162                 if (!(val & (1 << 9))) {
2163                         good_mbuf[good_mbuf_cnt] = (u16) val;
2164                         good_mbuf_cnt++;
2165                 }
2166
2167                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2168         }
2169
2170         /* Free the good ones back to the mbuf pool thus discarding
2171          * all the bad ones. */
2172         while (good_mbuf_cnt) {
2173                 good_mbuf_cnt--;
2174
2175                 val = good_mbuf[good_mbuf_cnt];
2176                 val = (val << 9) | val | 1;
2177
2178                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2179         }
2180         kfree(good_mbuf);
2181         return 0;
2182 }
2183
2184 static void
2185 bnx2_set_mac_addr(struct bnx2 *bp)
2186 {
2187         u32 val;
2188         u8 *mac_addr = bp->dev->dev_addr;
2189
2190         val = (mac_addr[0] << 8) | mac_addr[1];
2191
2192         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2193
2194         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2195                 (mac_addr[4] << 8) | mac_addr[5];
2196
2197         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2198 }
2199
2200 static inline int
2201 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2202 {
2203         struct sk_buff *skb;
2204         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2205         dma_addr_t mapping;
2206         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2207         unsigned long align;
2208
2209         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2210         if (skb == NULL) {
2211                 return -ENOMEM;
2212         }
2213
2214         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2215                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2216
2217         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2218                 PCI_DMA_FROMDEVICE);
2219
2220         rx_buf->skb = skb;
2221         pci_unmap_addr_set(rx_buf, mapping, mapping);
2222
2223         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2224         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2225
2226         bp->rx_prod_bseq += bp->rx_buf_use_size;
2227
2228         return 0;
2229 }
2230
2231 static int
2232 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2233 {
2234         struct status_block *sblk = bp->status_blk;
2235         u32 new_link_state, old_link_state;
2236         int is_set = 1;
2237
2238         new_link_state = sblk->status_attn_bits & event;
2239         old_link_state = sblk->status_attn_bits_ack & event;
2240         if (new_link_state != old_link_state) {
2241                 if (new_link_state)
2242                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2243                 else
2244                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2245         } else
2246                 is_set = 0;
2247
2248         return is_set;
2249 }
2250
2251 static void
2252 bnx2_phy_int(struct bnx2 *bp)
2253 {
2254         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2255                 spin_lock(&bp->phy_lock);
2256                 bnx2_set_link(bp);
2257                 spin_unlock(&bp->phy_lock);
2258         }
2259         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2260                 bnx2_set_remote_link(bp);
2261
2262 }
2263
2264 static void
2265 bnx2_tx_int(struct bnx2 *bp)
2266 {
2267         struct status_block *sblk = bp->status_blk;
2268         u16 hw_cons, sw_cons, sw_ring_cons;
2269         int tx_free_bd = 0;
2270
2271         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2272         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2273                 hw_cons++;
2274         }
2275         sw_cons = bp->tx_cons;
2276
2277         while (sw_cons != hw_cons) {
2278                 struct sw_bd *tx_buf;
2279                 struct sk_buff *skb;
2280                 int i, last;
2281
2282                 sw_ring_cons = TX_RING_IDX(sw_cons);
2283
2284                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2285                 skb = tx_buf->skb;
2286
2287                 /* partial BD completions possible with TSO packets */
2288                 if (skb_is_gso(skb)) {
2289                         u16 last_idx, last_ring_idx;
2290
2291                         last_idx = sw_cons +
2292                                 skb_shinfo(skb)->nr_frags + 1;
2293                         last_ring_idx = sw_ring_cons +
2294                                 skb_shinfo(skb)->nr_frags + 1;
2295                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2296                                 last_idx++;
2297                         }
2298                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2299                                 break;
2300                         }
2301                 }
2302
2303                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2304                         skb_headlen(skb), PCI_DMA_TODEVICE);
2305
2306                 tx_buf->skb = NULL;
2307                 last = skb_shinfo(skb)->nr_frags;
2308
2309                 for (i = 0; i < last; i++) {
2310                         sw_cons = NEXT_TX_BD(sw_cons);
2311
2312                         pci_unmap_page(bp->pdev,
2313                                 pci_unmap_addr(
2314                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2315                                         mapping),
2316                                 skb_shinfo(skb)->frags[i].size,
2317                                 PCI_DMA_TODEVICE);
2318                 }
2319
2320                 sw_cons = NEXT_TX_BD(sw_cons);
2321
2322                 tx_free_bd += last + 1;
2323
2324                 dev_kfree_skb(skb);
2325
2326                 hw_cons = bp->hw_tx_cons =
2327                         sblk->status_tx_quick_consumer_index0;
2328
2329                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2330                         hw_cons++;
2331                 }
2332         }
2333
2334         bp->tx_cons = sw_cons;
2335         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2336          * before checking for netif_queue_stopped().  Without the
2337          * memory barrier, there is a small possibility that bnx2_start_xmit()
2338          * will miss it and cause the queue to be stopped forever.
2339          */
2340         smp_mb();
2341
2342         if (unlikely(netif_queue_stopped(bp->dev)) &&
2343                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2344                 netif_tx_lock(bp->dev);
2345                 if ((netif_queue_stopped(bp->dev)) &&
2346                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2347                         netif_wake_queue(bp->dev);
2348                 netif_tx_unlock(bp->dev);
2349         }
2350 }
2351
2352 static inline void
2353 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2354         u16 cons, u16 prod)
2355 {
2356         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2357         struct rx_bd *cons_bd, *prod_bd;
2358
2359         cons_rx_buf = &bp->rx_buf_ring[cons];
2360         prod_rx_buf = &bp->rx_buf_ring[prod];
2361
2362         pci_dma_sync_single_for_device(bp->pdev,
2363                 pci_unmap_addr(cons_rx_buf, mapping),
2364                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2365
2366         bp->rx_prod_bseq += bp->rx_buf_use_size;
2367
2368         prod_rx_buf->skb = skb;
2369
2370         if (cons == prod)
2371                 return;
2372
2373         pci_unmap_addr_set(prod_rx_buf, mapping,
2374                         pci_unmap_addr(cons_rx_buf, mapping));
2375
2376         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2377         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2378         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2379         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2380 }
2381
2382 static inline u16
2383 bnx2_get_hw_rx_cons(struct bnx2 *bp)
2384 {
2385         u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2386
2387         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2388                 cons++;
2389         return cons;
2390 }
2391
2392 static int
2393 bnx2_rx_int(struct bnx2 *bp, int budget)
2394 {
2395         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2396         struct l2_fhdr *rx_hdr;
2397         int rx_pkt = 0;
2398
2399         hw_cons = bnx2_get_hw_rx_cons(bp);
2400         sw_cons = bp->rx_cons;
2401         sw_prod = bp->rx_prod;
2402
2403         /* Memory barrier necessary as speculative reads of the rx
2404          * buffer can be ahead of the index in the status block
2405          */
2406         rmb();
2407         while (sw_cons != hw_cons) {
2408                 unsigned int len;
2409                 u32 status;
2410                 struct sw_bd *rx_buf;
2411                 struct sk_buff *skb;
2412                 dma_addr_t dma_addr;
2413
2414                 sw_ring_cons = RX_RING_IDX(sw_cons);
2415                 sw_ring_prod = RX_RING_IDX(sw_prod);
2416
2417                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2418                 skb = rx_buf->skb;
2419
2420                 rx_buf->skb = NULL;
2421
2422                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2423
2424                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2425                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2426
2427                 rx_hdr = (struct l2_fhdr *) skb->data;
2428                 len = rx_hdr->l2_fhdr_pkt_len - 4;
2429
2430                 if ((status = rx_hdr->l2_fhdr_status) &
2431                         (L2_FHDR_ERRORS_BAD_CRC |
2432                         L2_FHDR_ERRORS_PHY_DECODE |
2433                         L2_FHDR_ERRORS_ALIGNMENT |
2434                         L2_FHDR_ERRORS_TOO_SHORT |
2435                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2436
2437                         goto reuse_rx;
2438                 }
2439
2440                 /* Since we don't have a jumbo ring, copy small packets
2441                  * if mtu > 1500
2442                  */
2443                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2444                         struct sk_buff *new_skb;
2445
2446                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2447                         if (new_skb == NULL)
2448                                 goto reuse_rx;
2449
2450                         /* aligned copy */
2451                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2452                                       new_skb->data, len + 2);
2453                         skb_reserve(new_skb, 2);
2454                         skb_put(new_skb, len);
2455
2456                         bnx2_reuse_rx_skb(bp, skb,
2457                                 sw_ring_cons, sw_ring_prod);
2458
2459                         skb = new_skb;
2460                 }
2461                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2462                         pci_unmap_single(bp->pdev, dma_addr,
2463                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2464
2465                         skb_reserve(skb, bp->rx_offset);
2466                         skb_put(skb, len);
2467                 }
2468                 else {
2469 reuse_rx:
2470                         bnx2_reuse_rx_skb(bp, skb,
2471                                 sw_ring_cons, sw_ring_prod);
2472                         goto next_rx;
2473                 }
2474
2475                 skb->protocol = eth_type_trans(skb, bp->dev);
2476
2477                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2478                         (ntohs(skb->protocol) != 0x8100)) {
2479
2480                         dev_kfree_skb(skb);
2481                         goto next_rx;
2482
2483                 }
2484
2485                 skb->ip_summed = CHECKSUM_NONE;
2486                 if (bp->rx_csum &&
2487                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2488                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2489
2490                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2491                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2492                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2493                 }
2494
2495 #ifdef BCM_VLAN
2496                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2497                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2498                                 rx_hdr->l2_fhdr_vlan_tag);
2499                 }
2500                 else
2501 #endif
2502                         netif_receive_skb(skb);
2503
2504                 bp->dev->last_rx = jiffies;
2505                 rx_pkt++;
2506
2507 next_rx:
2508                 sw_cons = NEXT_RX_BD(sw_cons);
2509                 sw_prod = NEXT_RX_BD(sw_prod);
2510
2511                 if ((rx_pkt == budget))
2512                         break;
2513
2514                 /* Refresh hw_cons to see if there is new work */
2515                 if (sw_cons == hw_cons) {
2516                         hw_cons = bnx2_get_hw_rx_cons(bp);
2517                         rmb();
2518                 }
2519         }
2520         bp->rx_cons = sw_cons;
2521         bp->rx_prod = sw_prod;
2522
2523         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2524
2525         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2526
2527         mmiowb();
2528
2529         return rx_pkt;
2530
2531 }
2532
2533 /* MSI ISR - The only difference between this and the INTx ISR
2534  * is that the MSI interrupt is always serviced.
2535  */
2536 static irqreturn_t
2537 bnx2_msi(int irq, void *dev_instance)
2538 {
2539         struct net_device *dev = dev_instance;
2540         struct bnx2 *bp = netdev_priv(dev);
2541
2542         prefetch(bp->status_blk);
2543         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2544                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2545                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2546
2547         /* Return here if interrupt is disabled. */
2548         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2549                 return IRQ_HANDLED;
2550
2551         netif_rx_schedule(dev, &bp->napi);
2552
2553         return IRQ_HANDLED;
2554 }
2555
2556 static irqreturn_t
2557 bnx2_msi_1shot(int irq, void *dev_instance)
2558 {
2559         struct net_device *dev = dev_instance;
2560         struct bnx2 *bp = netdev_priv(dev);
2561
2562         prefetch(bp->status_blk);
2563
2564         /* Return here if interrupt is disabled. */
2565         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2566                 return IRQ_HANDLED;
2567
2568         netif_rx_schedule(dev, &bp->napi);
2569
2570         return IRQ_HANDLED;
2571 }
2572
2573 static irqreturn_t
2574 bnx2_interrupt(int irq, void *dev_instance)
2575 {
2576         struct net_device *dev = dev_instance;
2577         struct bnx2 *bp = netdev_priv(dev);
2578         struct status_block *sblk = bp->status_blk;
2579
2580         /* When using INTx, it is possible for the interrupt to arrive
2581          * at the CPU before the status block posted prior to the
2582          * interrupt. Reading a register will flush the status block.
2583          * When using MSI, the MSI message will always complete after
2584          * the status block write.
2585          */
2586         if ((sblk->status_idx == bp->last_status_idx) &&
2587             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2588              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2589                 return IRQ_NONE;
2590
2591         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2592                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2593                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2594
2595         /* Read back to deassert IRQ immediately to avoid too many
2596          * spurious interrupts.
2597          */
2598         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2599
2600         /* Return here if interrupt is shared and is disabled. */
2601         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2602                 return IRQ_HANDLED;
2603
2604         if (netif_rx_schedule_prep(dev, &bp->napi)) {
2605                 bp->last_status_idx = sblk->status_idx;
2606                 __netif_rx_schedule(dev, &bp->napi);
2607         }
2608
2609         return IRQ_HANDLED;
2610 }
2611
2612 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2613                                  STATUS_ATTN_BITS_TIMER_ABORT)
2614
2615 static inline int
2616 bnx2_has_work(struct bnx2 *bp)
2617 {
2618         struct status_block *sblk = bp->status_blk;
2619
2620         if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
2621             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2622                 return 1;
2623
2624         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2625             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2626                 return 1;
2627
2628         return 0;
2629 }
2630
2631 static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
2632 {
2633         struct status_block *sblk = bp->status_blk;
2634         u32 status_attn_bits = sblk->status_attn_bits;
2635         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2636
2637         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2638             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2639
2640                 bnx2_phy_int(bp);
2641
2642                 /* This is needed to take care of transient status
2643                  * during link changes.
2644                  */
2645                 REG_WR(bp, BNX2_HC_COMMAND,
2646                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2647                 REG_RD(bp, BNX2_HC_COMMAND);
2648         }
2649
2650         if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2651                 bnx2_tx_int(bp);
2652
2653         if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
2654                 work_done += bnx2_rx_int(bp, budget - work_done);
2655
2656         return work_done;
2657 }
2658
2659 static int bnx2_poll(struct napi_struct *napi, int budget)
2660 {
2661         struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2662         int work_done = 0;
2663         struct status_block *sblk = bp->status_blk;
2664
2665         while (1) {
2666                 work_done = bnx2_poll_work(bp, work_done, budget);
2667
2668                 if (unlikely(work_done >= budget))
2669                         break;
2670
2671                 /* bp->last_status_idx is used below to tell the hw how
2672                  * much work has been processed, so we must read it before
2673                  * checking for more work.
2674                  */
2675                 bp->last_status_idx = sblk->status_idx;
2676                 rmb();
2677                 if (likely(!bnx2_has_work(bp))) {
2678                         netif_rx_complete(bp->dev, napi);
2679                         if (likely(bp->flags & USING_MSI_FLAG)) {
2680                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2681                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2682                                        bp->last_status_idx);
2683                                 break;
2684                         }
2685                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2686                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2687                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2688                                bp->last_status_idx);
2689
2690                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2691                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2692                                bp->last_status_idx);
2693                         break;
2694                 }
2695         }
2696
2697         return work_done;
2698 }
2699
2700 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2701  * from set_multicast.
2702  */
2703 static void
2704 bnx2_set_rx_mode(struct net_device *dev)
2705 {
2706         struct bnx2 *bp = netdev_priv(dev);
2707         u32 rx_mode, sort_mode;
2708         int i;
2709
2710         spin_lock_bh(&bp->phy_lock);
2711
2712         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2713                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2714         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2715 #ifdef BCM_VLAN
2716         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2717                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2718 #else
2719         if (!(bp->flags & ASF_ENABLE_FLAG))
2720                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2721 #endif
2722         if (dev->flags & IFF_PROMISC) {
2723                 /* Promiscuous mode. */
2724                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2725                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2726                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2727         }
2728         else if (dev->flags & IFF_ALLMULTI) {
2729                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2730                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2731                                0xffffffff);
2732                 }
2733                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2734         }
2735         else {
2736                 /* Accept one or more multicast(s). */
2737                 struct dev_mc_list *mclist;
2738                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2739                 u32 regidx;
2740                 u32 bit;
2741                 u32 crc;
2742
2743                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2744
2745                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2746                      i++, mclist = mclist->next) {
2747
2748                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2749                         bit = crc & 0xff;
2750                         regidx = (bit & 0xe0) >> 5;
2751                         bit &= 0x1f;
2752                         mc_filter[regidx] |= (1 << bit);
2753                 }
2754
2755                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2756                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2757                                mc_filter[i]);
2758                 }
2759
2760                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2761         }
2762
2763         if (rx_mode != bp->rx_mode) {
2764                 bp->rx_mode = rx_mode;
2765                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2766         }
2767
2768         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2769         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2770         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2771
2772         spin_unlock_bh(&bp->phy_lock);
2773 }
2774
2775 static void
2776 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2777         u32 rv2p_proc)
2778 {
2779         int i;
2780         u32 val;
2781
2782
2783         for (i = 0; i < rv2p_code_len; i += 8) {
2784                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2785                 rv2p_code++;
2786                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2787                 rv2p_code++;
2788
2789                 if (rv2p_proc == RV2P_PROC1) {
2790                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2791                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2792                 }
2793                 else {
2794                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2795                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2796                 }
2797         }
2798
2799         /* Reset the processor, un-stall is done later. */
2800         if (rv2p_proc == RV2P_PROC1) {
2801                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2802         }
2803         else {
2804                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2805         }
2806 }
2807
2808 static int
2809 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2810 {
2811         u32 offset;
2812         u32 val;
2813         int rc;
2814
2815         /* Halt the CPU. */
2816         val = REG_RD_IND(bp, cpu_reg->mode);
2817         val |= cpu_reg->mode_value_halt;
2818         REG_WR_IND(bp, cpu_reg->mode, val);
2819         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2820
2821         /* Load the Text area. */
2822         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2823         if (fw->gz_text) {
2824                 int j;
2825
2826                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
2827                                        fw->gz_text_len);
2828                 if (rc < 0)
2829                         return rc;
2830
2831                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2832                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2833                 }
2834         }
2835
2836         /* Load the Data area. */
2837         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2838         if (fw->data) {
2839                 int j;
2840
2841                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2842                         REG_WR_IND(bp, offset, fw->data[j]);
2843                 }
2844         }
2845
2846         /* Load the SBSS area. */
2847         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2848         if (fw->sbss_len) {
2849                 int j;
2850
2851                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2852                         REG_WR_IND(bp, offset, 0);
2853                 }
2854         }
2855
2856         /* Load the BSS area. */
2857         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2858         if (fw->bss_len) {
2859                 int j;
2860
2861                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2862                         REG_WR_IND(bp, offset, 0);
2863                 }
2864         }
2865
2866         /* Load the Read-Only area. */
2867         offset = cpu_reg->spad_base +
2868                 (fw->rodata_addr - cpu_reg->mips_view_base);
2869         if (fw->rodata) {
2870                 int j;
2871
2872                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2873                         REG_WR_IND(bp, offset, fw->rodata[j]);
2874                 }
2875         }
2876
2877         /* Clear the pre-fetch instruction. */
2878         REG_WR_IND(bp, cpu_reg->inst, 0);
2879         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2880
2881         /* Start the CPU. */
2882         val = REG_RD_IND(bp, cpu_reg->mode);
2883         val &= ~cpu_reg->mode_value_halt;
2884         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2885         REG_WR_IND(bp, cpu_reg->mode, val);
2886
2887         return 0;
2888 }
2889
2890 static int
2891 bnx2_init_cpus(struct bnx2 *bp)
2892 {
2893         struct cpu_reg cpu_reg;
2894         struct fw_info *fw;
2895         int rc;
2896         void *text;
2897
2898         /* Initialize the RV2P processor. */
2899         text = vmalloc(FW_BUF_SIZE);
2900         if (!text)
2901                 return -ENOMEM;
2902         rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1));
2903         if (rc < 0)
2904                 goto init_cpu_err;
2905
2906         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
2907
2908         rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2));
2909         if (rc < 0)
2910                 goto init_cpu_err;
2911
2912         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
2913
2914         /* Initialize the RX Processor. */
2915         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2916         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2917         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2918         cpu_reg.state = BNX2_RXP_CPU_STATE;
2919         cpu_reg.state_value_clear = 0xffffff;
2920         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2921         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2922         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2923         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2924         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2925         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2926         cpu_reg.mips_view_base = 0x8000000;
2927
2928         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2929                 fw = &bnx2_rxp_fw_09;
2930         else
2931                 fw = &bnx2_rxp_fw_06;
2932
2933         fw->text = text;
2934         rc = load_cpu_fw(bp, &cpu_reg, fw);
2935         if (rc)
2936                 goto init_cpu_err;
2937
2938         /* Initialize the TX Processor. */
2939         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2940         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2941         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2942         cpu_reg.state = BNX2_TXP_CPU_STATE;
2943         cpu_reg.state_value_clear = 0xffffff;
2944         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2945         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2946         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2947         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2948         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2949         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2950         cpu_reg.mips_view_base = 0x8000000;
2951
2952         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2953                 fw = &bnx2_txp_fw_09;
2954         else
2955                 fw = &bnx2_txp_fw_06;
2956
2957         fw->text = text;
2958         rc = load_cpu_fw(bp, &cpu_reg, fw);
2959         if (rc)
2960                 goto init_cpu_err;
2961
2962         /* Initialize the TX Patch-up Processor. */
2963         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2964         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2965         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2966         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2967         cpu_reg.state_value_clear = 0xffffff;
2968         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2969         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2970         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2971         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2972         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2973         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2974         cpu_reg.mips_view_base = 0x8000000;
2975
2976         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2977                 fw = &bnx2_tpat_fw_09;
2978         else
2979                 fw = &bnx2_tpat_fw_06;
2980
2981         fw->text = text;
2982         rc = load_cpu_fw(bp, &cpu_reg, fw);
2983         if (rc)
2984                 goto init_cpu_err;
2985
2986         /* Initialize the Completion Processor. */
2987         cpu_reg.mode = BNX2_COM_CPU_MODE;
2988         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2989         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2990         cpu_reg.state = BNX2_COM_CPU_STATE;
2991         cpu_reg.state_value_clear = 0xffffff;
2992         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2993         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2994         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2995         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2996         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2997         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2998         cpu_reg.mips_view_base = 0x8000000;
2999
3000         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3001                 fw = &bnx2_com_fw_09;
3002         else
3003                 fw = &bnx2_com_fw_06;
3004
3005         fw->text = text;
3006         rc = load_cpu_fw(bp, &cpu_reg, fw);
3007         if (rc)
3008                 goto init_cpu_err;
3009
3010         /* Initialize the Command Processor. */
3011         cpu_reg.mode = BNX2_CP_CPU_MODE;
3012         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3013         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3014         cpu_reg.state = BNX2_CP_CPU_STATE;
3015         cpu_reg.state_value_clear = 0xffffff;
3016         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3017         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3018         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3019         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3020         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3021         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3022         cpu_reg.mips_view_base = 0x8000000;
3023
3024         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3025                 fw = &bnx2_cp_fw_09;
3026
3027                 fw->text = text;
3028                 rc = load_cpu_fw(bp, &cpu_reg, fw);
3029                 if (rc)
3030                         goto init_cpu_err;
3031         }
3032 init_cpu_err:
3033         vfree(text);
3034         return rc;
3035 }
3036
3037 static int
3038 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3039 {
3040         u16 pmcsr;
3041
3042         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3043
3044         switch (state) {
3045         case PCI_D0: {
3046                 u32 val;
3047
3048                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3049                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3050                         PCI_PM_CTRL_PME_STATUS);
3051
3052                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3053                         /* delay required during transition out of D3hot */
3054                         msleep(20);
3055
3056                 val = REG_RD(bp, BNX2_EMAC_MODE);
3057                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3058                 val &= ~BNX2_EMAC_MODE_MPKT;
3059                 REG_WR(bp, BNX2_EMAC_MODE, val);
3060
3061                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3062                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3063                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3064                 break;
3065         }
3066         case PCI_D3hot: {
3067                 int i;
3068                 u32 val, wol_msg;
3069
3070                 if (bp->wol) {
3071                         u32 advertising;
3072                         u8 autoneg;
3073
3074                         autoneg = bp->autoneg;
3075                         advertising = bp->advertising;
3076
3077                         if (bp->phy_port == PORT_TP) {
3078                                 bp->autoneg = AUTONEG_SPEED;
3079                                 bp->advertising = ADVERTISED_10baseT_Half |
3080                                         ADVERTISED_10baseT_Full |
3081                                         ADVERTISED_100baseT_Half |
3082                                         ADVERTISED_100baseT_Full |
3083                                         ADVERTISED_Autoneg;
3084                         }
3085
3086                         spin_lock_bh(&bp->phy_lock);
3087                         bnx2_setup_phy(bp, bp->phy_port);
3088                         spin_unlock_bh(&bp->phy_lock);
3089
3090                         bp->autoneg = autoneg;
3091                         bp->advertising = advertising;
3092
3093                         bnx2_set_mac_addr(bp);
3094
3095                         val = REG_RD(bp, BNX2_EMAC_MODE);
3096
3097                         /* Enable port mode. */
3098                         val &= ~BNX2_EMAC_MODE_PORT;
3099                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3100                                BNX2_EMAC_MODE_ACPI_RCVD |
3101                                BNX2_EMAC_MODE_MPKT;
3102                         if (bp->phy_port == PORT_TP)
3103                                 val |= BNX2_EMAC_MODE_PORT_MII;
3104                         else {
3105                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3106                                 if (bp->line_speed == SPEED_2500)
3107                                         val |= BNX2_EMAC_MODE_25G_MODE;
3108                         }
3109
3110                         REG_WR(bp, BNX2_EMAC_MODE, val);
3111
3112                         /* receive all multicast */
3113                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3114                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3115                                        0xffffffff);
3116                         }
3117                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3118                                BNX2_EMAC_RX_MODE_SORT_MODE);
3119
3120                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3121                               BNX2_RPM_SORT_USER0_MC_EN;
3122                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3123                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3124                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3125                                BNX2_RPM_SORT_USER0_ENA);
3126
3127                         /* Need to enable EMAC and RPM for WOL. */
3128                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3129                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3130                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3131                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3132
3133                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3134                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3135                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3136
3137                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3138                 }
3139                 else {
3140                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3141                 }
3142
3143                 if (!(bp->flags & NO_WOL_FLAG))
3144                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3145
3146                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3147                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3148                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3149
3150                         if (bp->wol)
3151                                 pmcsr |= 3;
3152                 }
3153                 else {
3154                         pmcsr |= 3;
3155                 }
3156                 if (bp->wol) {
3157                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3158                 }
3159                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3160                                       pmcsr);
3161
3162                 /* No more memory access after this point until
3163                  * device is brought back to D0.
3164                  */
3165                 udelay(50);
3166                 break;
3167         }
3168         default:
3169                 return -EINVAL;
3170         }
3171         return 0;
3172 }
3173
3174 static int
3175 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3176 {
3177         u32 val;
3178         int j;
3179
3180         /* Request access to the flash interface. */
3181         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3182         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3183                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3184                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3185                         break;
3186
3187                 udelay(5);
3188         }
3189
3190         if (j >= NVRAM_TIMEOUT_COUNT)
3191                 return -EBUSY;
3192
3193         return 0;
3194 }
3195
3196 static int
3197 bnx2_release_nvram_lock(struct bnx2 *bp)
3198 {
3199         int j;
3200         u32 val;
3201
3202         /* Relinquish nvram interface. */
3203         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3204
3205         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3206                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3207                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3208                         break;
3209
3210                 udelay(5);
3211         }
3212
3213         if (j >= NVRAM_TIMEOUT_COUNT)
3214                 return -EBUSY;
3215
3216         return 0;
3217 }
3218
3219
3220 static int
3221 bnx2_enable_nvram_write(struct bnx2 *bp)
3222 {
3223         u32 val;
3224
3225         val = REG_RD(bp, BNX2_MISC_CFG);
3226         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3227
3228         if (bp->flash_info->flags & BNX2_NV_WREN) {
3229                 int j;
3230
3231                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3232                 REG_WR(bp, BNX2_NVM_COMMAND,
3233                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3234
3235                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3236                         udelay(5);
3237
3238                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3239                         if (val & BNX2_NVM_COMMAND_DONE)
3240                                 break;
3241                 }
3242
3243                 if (j >= NVRAM_TIMEOUT_COUNT)
3244                         return -EBUSY;
3245         }
3246         return 0;
3247 }
3248
3249 static void
3250 bnx2_disable_nvram_write(struct bnx2 *bp)
3251 {
3252         u32 val;
3253
3254         val = REG_RD(bp, BNX2_MISC_CFG);
3255         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3256 }
3257
3258
3259 static void
3260 bnx2_enable_nvram_access(struct bnx2 *bp)
3261 {
3262         u32 val;
3263
3264         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3265         /* Enable both bits, even on read. */
3266         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3267                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3268 }
3269
3270 static void
3271 bnx2_disable_nvram_access(struct bnx2 *bp)
3272 {
3273         u32 val;
3274
3275         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3276         /* Disable both bits, even after read. */
3277         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3278                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3279                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3280 }
3281
3282 static int
3283 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3284 {
3285         u32 cmd;
3286         int j;
3287
3288         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3289                 /* Buffered flash, no erase needed */
3290                 return 0;
3291
3292         /* Build an erase command */
3293         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3294               BNX2_NVM_COMMAND_DOIT;
3295
3296         /* Need to clear DONE bit separately. */
3297         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3298
3299         /* Address of the NVRAM to read from. */
3300         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3301
3302         /* Issue an erase command. */
3303         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3304
3305         /* Wait for completion. */
3306         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3307                 u32 val;
3308
3309                 udelay(5);
3310
3311                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3312                 if (val & BNX2_NVM_COMMAND_DONE)
3313                         break;
3314         }
3315
3316         if (j >= NVRAM_TIMEOUT_COUNT)
3317                 return -EBUSY;
3318
3319         return 0;
3320 }
3321
3322 static int
3323 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3324 {
3325         u32 cmd;
3326         int j;
3327
3328         /* Build the command word. */
3329         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3330
3331         /* Calculate an offset of a buffered flash, not needed for 5709. */
3332         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3333                 offset = ((offset / bp->flash_info->page_size) <<
3334                            bp->flash_info->page_bits) +
3335                           (offset % bp->flash_info->page_size);
3336         }
3337
3338         /* Need to clear DONE bit separately. */
3339         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3340
3341         /* Address of the NVRAM to read from. */
3342         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3343
3344         /* Issue a read command. */
3345         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3346
3347         /* Wait for completion. */
3348         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3349                 u32 val;
3350
3351                 udelay(5);
3352
3353                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3354                 if (val & BNX2_NVM_COMMAND_DONE) {
3355                         val = REG_RD(bp, BNX2_NVM_READ);
3356
3357                         val = be32_to_cpu(val);
3358                         memcpy(ret_val, &val, 4);
3359                         break;
3360                 }
3361         }
3362         if (j >= NVRAM_TIMEOUT_COUNT)
3363                 return -EBUSY;
3364
3365         return 0;
3366 }
3367
3368
3369 static int
3370 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3371 {
3372         u32 cmd, val32;
3373         int j;
3374
3375         /* Build the command word. */
3376         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3377
3378         /* Calculate an offset of a buffered flash, not needed for 5709. */
3379         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3380                 offset = ((offset / bp->flash_info->page_size) <<
3381                           bp->flash_info->page_bits) +
3382                          (offset % bp->flash_info->page_size);
3383         }
3384
3385         /* Need to clear DONE bit separately. */
3386         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3387
3388         memcpy(&val32, val, 4);
3389         val32 = cpu_to_be32(val32);
3390
3391         /* Write the data. */
3392         REG_WR(bp, BNX2_NVM_WRITE, val32);
3393
3394         /* Address of the NVRAM to write to. */
3395         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3396
3397         /* Issue the write command. */
3398         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3399
3400         /* Wait for completion. */
3401         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3402                 udelay(5);
3403
3404                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3405                         break;
3406         }
3407         if (j >= NVRAM_TIMEOUT_COUNT)
3408                 return -EBUSY;
3409
3410         return 0;
3411 }
3412
3413 static int
3414 bnx2_init_nvram(struct bnx2 *bp)
3415 {
3416         u32 val;
3417         int j, entry_count, rc = 0;
3418         struct flash_spec *flash;
3419
3420         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3421                 bp->flash_info = &flash_5709;
3422                 goto get_flash_size;
3423         }
3424
3425         /* Determine the selected interface. */
3426         val = REG_RD(bp, BNX2_NVM_CFG1);
3427
3428         entry_count = ARRAY_SIZE(flash_table);
3429
3430         if (val & 0x40000000) {
3431
3432                 /* Flash interface has been reconfigured */
3433                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3434                      j++, flash++) {
3435                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3436                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3437                                 bp->flash_info = flash;
3438                                 break;
3439                         }
3440                 }
3441         }
3442         else {
3443                 u32 mask;
3444                 /* Not yet been reconfigured */
3445
3446                 if (val & (1 << 23))
3447                         mask = FLASH_BACKUP_STRAP_MASK;
3448                 else
3449                         mask = FLASH_STRAP_MASK;
3450
3451                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3452                         j++, flash++) {
3453
3454                         if ((val & mask) == (flash->strapping & mask)) {
3455                                 bp->flash_info = flash;
3456
3457                                 /* Request access to the flash interface. */
3458                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3459                                         return rc;
3460
3461                                 /* Enable access to flash interface */
3462                                 bnx2_enable_nvram_access(bp);
3463
3464                                 /* Reconfigure the flash interface */
3465                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3466                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3467                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3468                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3469
3470                                 /* Disable access to flash interface */
3471                                 bnx2_disable_nvram_access(bp);
3472                                 bnx2_release_nvram_lock(bp);
3473
3474                                 break;
3475                         }
3476                 }
3477         } /* if (val & 0x40000000) */
3478
3479         if (j == entry_count) {
3480                 bp->flash_info = NULL;
3481                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3482                 return -ENODEV;
3483         }
3484
3485 get_flash_size:
3486         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3487         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3488         if (val)
3489                 bp->flash_size = val;
3490         else
3491                 bp->flash_size = bp->flash_info->total_size;
3492
3493         return rc;
3494 }
3495
3496 static int
3497 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3498                 int buf_size)
3499 {
3500         int rc = 0;
3501         u32 cmd_flags, offset32, len32, extra;
3502
3503         if (buf_size == 0)
3504                 return 0;
3505
3506         /* Request access to the flash interface. */
3507         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3508                 return rc;
3509
3510         /* Enable access to flash interface */
3511         bnx2_enable_nvram_access(bp);
3512
3513         len32 = buf_size;
3514         offset32 = offset;
3515         extra = 0;
3516
3517         cmd_flags = 0;
3518
3519         if (offset32 & 3) {
3520                 u8 buf[4];
3521                 u32 pre_len;
3522
3523                 offset32 &= ~3;
3524                 pre_len = 4 - (offset & 3);
3525
3526                 if (pre_len >= len32) {
3527                         pre_len = len32;
3528                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3529                                     BNX2_NVM_COMMAND_LAST;
3530                 }
3531                 else {
3532                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3533                 }
3534
3535                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3536
3537                 if (rc)
3538                         return rc;
3539
3540                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3541
3542                 offset32 += 4;
3543                 ret_buf += pre_len;
3544                 len32 -= pre_len;
3545         }
3546         if (len32 & 3) {
3547                 extra = 4 - (len32 & 3);
3548                 len32 = (len32 + 4) & ~3;
3549         }
3550
3551         if (len32 == 4) {
3552                 u8 buf[4];
3553
3554                 if (cmd_flags)
3555                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3556                 else
3557                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3558                                     BNX2_NVM_COMMAND_LAST;
3559
3560                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3561
3562                 memcpy(ret_buf, buf, 4 - extra);
3563         }
3564         else if (len32 > 0) {
3565                 u8 buf[4];
3566
3567                 /* Read the first word. */
3568                 if (cmd_flags)
3569                         cmd_flags = 0;
3570                 else
3571                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3572
3573                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3574
3575                 /* Advance to the next dword. */
3576                 offset32 += 4;
3577                 ret_buf += 4;
3578                 len32 -= 4;
3579
3580                 while (len32 > 4 && rc == 0) {
3581                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3582
3583                         /* Advance to the next dword. */
3584                         offset32 += 4;
3585                         ret_buf += 4;
3586                         len32 -= 4;
3587                 }
3588
3589                 if (rc)
3590                         return rc;
3591
3592                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3593                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3594
3595                 memcpy(ret_buf, buf, 4 - extra);
3596         }
3597
3598         /* Disable access to flash interface */
3599         bnx2_disable_nvram_access(bp);
3600
3601         bnx2_release_nvram_lock(bp);
3602
3603         return rc;
3604 }
3605
3606 static int
3607 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3608                 int buf_size)
3609 {
3610         u32 written, offset32, len32;
3611         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3612         int rc = 0;
3613         int align_start, align_end;
3614
3615         buf = data_buf;
3616         offset32 = offset;
3617         len32 = buf_size;
3618         align_start = align_end = 0;
3619
3620         if ((align_start = (offset32 & 3))) {
3621                 offset32 &= ~3;
3622                 len32 += align_start;
3623                 if (len32 < 4)
3624                         len32 = 4;
3625                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3626                         return rc;
3627         }
3628
3629         if (len32 & 3) {
3630                 align_end = 4 - (len32 & 3);
3631                 len32 += align_end;
3632                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3633                         return rc;
3634         }
3635
3636         if (align_start || align_end) {
3637                 align_buf = kmalloc(len32, GFP_KERNEL);
3638                 if (align_buf == NULL)
3639                         return -ENOMEM;
3640                 if (align_start) {
3641                         memcpy(align_buf, start, 4);
3642                 }
3643                 if (align_end) {
3644                         memcpy(align_buf + len32 - 4, end, 4);
3645                 }
3646                 memcpy(align_buf + align_start, data_buf, buf_size);
3647                 buf = align_buf;
3648         }
3649
3650         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3651                 flash_buffer = kmalloc(264, GFP_KERNEL);
3652                 if (flash_buffer == NULL) {
3653                         rc = -ENOMEM;
3654                         goto nvram_write_end;
3655                 }
3656         }
3657
3658         written = 0;
3659         while ((written < len32) && (rc == 0)) {
3660                 u32 page_start, page_end, data_start, data_end;
3661                 u32 addr, cmd_flags;
3662                 int i;
3663
3664                 /* Find the page_start addr */
3665                 page_start = offset32 + written;
3666                 page_start -= (page_start % bp->flash_info->page_size);
3667                 /* Find the page_end addr */
3668                 page_end = page_start + bp->flash_info->page_size;
3669                 /* Find the data_start addr */
3670                 data_start = (written == 0) ? offset32 : page_start;
3671                 /* Find the data_end addr */
3672                 data_end = (page_end > offset32 + len32) ?
3673                         (offset32 + len32) : page_end;
3674
3675                 /* Request access to the flash interface. */
3676                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3677                         goto nvram_write_end;
3678
3679                 /* Enable access to flash interface */
3680                 bnx2_enable_nvram_access(bp);
3681
3682                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3683                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3684                         int j;
3685
3686                         /* Read the whole page into the buffer
3687                          * (non-buffer flash only) */
3688                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3689                                 if (j == (bp->flash_info->page_size - 4)) {
3690                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3691                                 }
3692                                 rc = bnx2_nvram_read_dword(bp,
3693                                         page_start + j,
3694                                         &flash_buffer[j],
3695                                         cmd_flags);
3696
3697                                 if (rc)
3698                                         goto nvram_write_end;
3699
3700                                 cmd_flags = 0;
3701                         }
3702                 }
3703
3704                 /* Enable writes to flash interface (unlock write-protect) */
3705                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3706                         goto nvram_write_end;
3707
3708                 /* Loop to write back the buffer data from page_start to
3709                  * data_start */
3710                 i = 0;
3711                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3712                         /* Erase the page */
3713                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3714                                 goto nvram_write_end;
3715
3716                         /* Re-enable the write again for the actual write */
3717                         bnx2_enable_nvram_write(bp);
3718
3719                         for (addr = page_start; addr < data_start;
3720                                 addr += 4, i += 4) {
3721
3722                                 rc = bnx2_nvram_write_dword(bp, addr,
3723                                         &flash_buffer[i], cmd_flags);
3724
3725                                 if (rc != 0)
3726                                         goto nvram_write_end;
3727
3728                                 cmd_flags = 0;
3729                         }
3730                 }
3731
3732                 /* Loop to write the new data from data_start to data_end */
3733                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3734                         if ((addr == page_end - 4) ||
3735                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3736                                  (addr == data_end - 4))) {
3737
3738                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3739                         }
3740                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3741                                 cmd_flags);
3742
3743                         if (rc != 0)
3744                                 goto nvram_write_end;
3745
3746                         cmd_flags = 0;
3747                         buf += 4;
3748                 }
3749
3750                 /* Loop to write back the buffer data from data_end
3751                  * to page_end */
3752                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3753                         for (addr = data_end; addr < page_end;
3754                                 addr += 4, i += 4) {
3755
3756                                 if (addr == page_end-4) {
3757                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3758                                 }
3759                                 rc = bnx2_nvram_write_dword(bp, addr,
3760                                         &flash_buffer[i], cmd_flags);
3761
3762                                 if (rc != 0)
3763                                         goto nvram_write_end;
3764
3765                                 cmd_flags = 0;
3766                         }
3767                 }
3768
3769                 /* Disable writes to flash interface (lock write-protect) */
3770                 bnx2_disable_nvram_write(bp);
3771
3772                 /* Disable access to flash interface */
3773                 bnx2_disable_nvram_access(bp);
3774                 bnx2_release_nvram_lock(bp);
3775
3776                 /* Increment written */
3777                 written += data_end - data_start;
3778         }
3779
3780 nvram_write_end:
3781         kfree(flash_buffer);
3782         kfree(align_buf);
3783         return rc;
3784 }
3785
3786 static void
3787 bnx2_init_remote_phy(struct bnx2 *bp)
3788 {
3789         u32 val;
3790
3791         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3792         if (!(bp->phy_flags & PHY_SERDES_FLAG))
3793                 return;
3794
3795         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3796         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3797                 return;
3798
3799         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3800                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3801
3802                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3803                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3804                         bp->phy_port = PORT_FIBRE;
3805                 else
3806                         bp->phy_port = PORT_TP;
3807
3808                 if (netif_running(bp->dev)) {
3809                         u32 sig;
3810
3811                         if (val & BNX2_LINK_STATUS_LINK_UP) {
3812                                 bp->link_up = 1;
3813                                 netif_carrier_on(bp->dev);
3814                         } else {
3815                                 bp->link_up = 0;
3816                                 netif_carrier_off(bp->dev);
3817                         }
3818                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
3819                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3820                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3821                                    sig);
3822                 }
3823         }
3824 }
3825
3826 static int
3827 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3828 {
3829         u32 val;
3830         int i, rc = 0;
3831         u8 old_port;
3832
3833         /* Wait for the current PCI transaction to complete before
3834          * issuing a reset. */
3835         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3836                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3837                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3838                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3839                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3840         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3841         udelay(5);
3842
3843         /* Wait for the firmware to tell us it is ok to issue a reset. */
3844         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3845
3846         /* Deposit a driver reset signature so the firmware knows that
3847          * this is a soft reset. */
3848         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3849                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3850
3851         /* Do a dummy read to force the chip to complete all current transaction
3852          * before we issue a reset. */
3853         val = REG_RD(bp, BNX2_MISC_ID);
3854
3855         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3856                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3857                 REG_RD(bp, BNX2_MISC_COMMAND);
3858                 udelay(5);
3859
3860                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3861                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3862
3863                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3864
3865         } else {
3866                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3867                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3868                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3869
3870                 /* Chip reset. */
3871                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3872
3873                 /* Reading back any register after chip reset will hang the
3874                  * bus on 5706 A0 and A1.  The msleep below provides plenty
3875                  * of margin for write posting.
3876                  */
3877                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3878                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
3879                         msleep(20);
3880
3881                 /* Reset takes approximate 30 usec */
3882                 for (i = 0; i < 10; i++) {
3883                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3884                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3885                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3886                                 break;
3887                         udelay(10);
3888                 }
3889
3890                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3891                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3892                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3893                         return -EBUSY;
3894                 }
3895         }
3896
3897         /* Make sure byte swapping is properly configured. */
3898         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3899         if (val != 0x01020304) {
3900                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3901                 return -ENODEV;
3902         }
3903
3904         /* Wait for the firmware to finish its initialization. */
3905         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3906         if (rc)
3907                 return rc;
3908
3909         spin_lock_bh(&bp->phy_lock);
3910         old_port = bp->phy_port;
3911         bnx2_init_remote_phy(bp);
3912         if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
3913                 bnx2_set_default_remote_link(bp);
3914         spin_unlock_bh(&bp->phy_lock);
3915
3916         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3917                 /* Adjust the voltage regular to two steps lower.  The default
3918                  * of this register is 0x0000000e. */
3919                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3920
3921                 /* Remove bad rbuf memory from the free pool. */
3922                 rc = bnx2_alloc_bad_rbuf(bp);
3923         }
3924
3925         return rc;
3926 }
3927
3928 static int
3929 bnx2_init_chip(struct bnx2 *bp)
3930 {
3931         u32 val;
3932         int rc;
3933
3934         /* Make sure the interrupt is not active. */
3935         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3936
3937         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3938               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3939 #ifdef __BIG_ENDIAN
3940               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3941 #endif
3942               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3943               DMA_READ_CHANS << 12 |
3944               DMA_WRITE_CHANS << 16;
3945
3946         val |= (0x2 << 20) | (1 << 11);
3947
3948         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3949                 val |= (1 << 23);
3950
3951         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3952             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3953                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3954
3955         REG_WR(bp, BNX2_DMA_CONFIG, val);
3956
3957         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3958                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3959                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3960                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3961         }
3962
3963         if (bp->flags & PCIX_FLAG) {
3964                 u16 val16;
3965
3966                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3967                                      &val16);
3968                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3969                                       val16 & ~PCI_X_CMD_ERO);
3970         }
3971
3972         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3973                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3974                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3975                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3976
3977         /* Initialize context mapping and zero out the quick contexts.  The
3978          * context block must have already been enabled. */
3979         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3980                 rc = bnx2_init_5709_context(bp);
3981                 if (rc)
3982                         return rc;
3983         } else
3984                 bnx2_init_context(bp);
3985
3986         if ((rc = bnx2_init_cpus(bp)) != 0)
3987                 return rc;
3988
3989         bnx2_init_nvram(bp);
3990
3991         bnx2_set_mac_addr(bp);
3992
3993         val = REG_RD(bp, BNX2_MQ_CONFIG);
3994         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3995         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3996         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3997                 val |= BNX2_MQ_CONFIG_HALT_DIS;
3998
3999         REG_WR(bp, BNX2_MQ_CONFIG, val);
4000
4001         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4002         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4003         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4004
4005         val = (BCM_PAGE_BITS - 8) << 24;
4006         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4007
4008         /* Configure page size. */
4009         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4010         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4011         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4012         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4013
4014         val = bp->mac_addr[0] +
4015               (bp->mac_addr[1] << 8) +
4016               (bp->mac_addr[2] << 16) +
4017               bp->mac_addr[3] +
4018               (bp->mac_addr[4] << 8) +
4019               (bp->mac_addr[5] << 16);
4020         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4021
4022         /* Program the MTU.  Also include 4 bytes for CRC32. */
4023         val = bp->dev->mtu + ETH_HLEN + 4;
4024         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4025                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4026         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4027
4028         bp->last_status_idx = 0;
4029         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4030
4031         /* Set up how to generate a link change interrupt. */
4032         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4033
4034         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4035                (u64) bp->status_blk_mapping & 0xffffffff);
4036         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4037
4038         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4039                (u64) bp->stats_blk_mapping & 0xffffffff);
4040         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4041                (u64) bp->stats_blk_mapping >> 32);
4042
4043         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4044                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4045
4046         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4047                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4048
4049         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4050                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4051
4052         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4053
4054         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4055
4056         REG_WR(bp, BNX2_HC_COM_TICKS,
4057                (bp->com_ticks_int << 16) | bp->com_ticks);
4058
4059         REG_WR(bp, BNX2_HC_CMD_TICKS,
4060                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4061
4062         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4063                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4064         else
4065                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4066         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4067
4068         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4069                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4070         else {
4071                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4072                       BNX2_HC_CONFIG_COLLECT_STATS;
4073         }
4074
4075         if (bp->flags & ONE_SHOT_MSI_FLAG)
4076                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4077
4078         REG_WR(bp, BNX2_HC_CONFIG, val);
4079
4080         /* Clear internal stats counters. */
4081         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4082
4083         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4084
4085         /* Initialize the receive filter. */
4086         bnx2_set_rx_mode(bp->dev);
4087
4088         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4089                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4090                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4091                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4092         }
4093         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4094                           0);
4095
4096         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4097         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4098
4099         udelay(20);
4100
4101         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4102
4103         return rc;
4104 }
4105
4106 static void
4107 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4108 {
4109         u32 val, offset0, offset1, offset2, offset3;
4110
4111         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4112                 offset0 = BNX2_L2CTX_TYPE_XI;
4113                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4114                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4115                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4116         } else {
4117                 offset0 = BNX2_L2CTX_TYPE;
4118                 offset1 = BNX2_L2CTX_CMD_TYPE;
4119                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4120                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4121         }
4122         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4123         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4124
4125         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4126         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4127
4128         val = (u64) bp->tx_desc_mapping >> 32;
4129         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4130
4131         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4132         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4133 }
4134
4135 static void
4136 bnx2_init_tx_ring(struct bnx2 *bp)
4137 {
4138         struct tx_bd *txbd;
4139         u32 cid;
4140
4141         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4142
4143         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4144
4145         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4146         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4147
4148         bp->tx_prod = 0;
4149         bp->tx_cons = 0;
4150         bp->hw_tx_cons = 0;
4151         bp->tx_prod_bseq = 0;
4152
4153         cid = TX_CID;
4154         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4155         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4156
4157         bnx2_init_tx_context(bp, cid);
4158 }
4159
4160 static void
4161 bnx2_init_rx_ring(struct bnx2 *bp)
4162 {
4163         struct rx_bd *rxbd;
4164         int i;
4165         u16 prod, ring_prod;
4166         u32 val;
4167
4168         /* 8 for CRC and VLAN */
4169         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4170         /* hw alignment */
4171         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4172
4173         ring_prod = prod = bp->rx_prod = 0;
4174         bp->rx_cons = 0;
4175         bp->rx_prod_bseq = 0;
4176
4177         for (i = 0; i < bp->rx_max_ring; i++) {
4178                 int j;
4179
4180                 rxbd = &bp->rx_desc_ring[i][0];
4181                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4182                         rxbd->rx_bd_len = bp->rx_buf_use_size;
4183                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4184                 }
4185                 if (i == (bp->rx_max_ring - 1))
4186                         j = 0;
4187                 else
4188                         j = i + 1;
4189                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4190                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4191                                        0xffffffff;
4192         }
4193
4194         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4195         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4196         val |= 0x02 << 8;
4197         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4198
4199         val = (u64) bp->rx_desc_mapping[0] >> 32;
4200         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4201
4202         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4203         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4204
4205         for (i = 0; i < bp->rx_ring_size; i++) {
4206                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4207                         break;
4208                 }
4209                 prod = NEXT_RX_BD(prod);
4210                 ring_prod = RX_RING_IDX(prod);
4211         }
4212         bp->rx_prod = prod;
4213
4214         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4215
4216         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4217 }
4218
4219 static void
4220 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4221 {
4222         u32 num_rings, max;
4223
4224         bp->rx_ring_size = size;
4225         num_rings = 1;
4226         while (size > MAX_RX_DESC_CNT) {
4227                 size -= MAX_RX_DESC_CNT;
4228                 num_rings++;
4229         }
4230         /* round to next power of 2 */
4231         max = MAX_RX_RINGS;
4232         while ((max & num_rings) == 0)
4233                 max >>= 1;
4234
4235         if (num_rings != max)
4236                 max <<= 1;
4237
4238         bp->rx_max_ring = max;
4239         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4240 }
4241
4242 static void
4243 bnx2_free_tx_skbs(struct bnx2 *bp)
4244 {
4245         int i;
4246
4247         if (bp->tx_buf_ring == NULL)
4248                 return;
4249
4250         for (i = 0; i < TX_DESC_CNT; ) {
4251                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4252                 struct sk_buff *skb = tx_buf->skb;
4253                 int j, last;
4254
4255                 if (skb == NULL) {
4256                         i++;
4257                         continue;
4258                 }
4259
4260                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4261                         skb_headlen(skb), PCI_DMA_TODEVICE);
4262
4263                 tx_buf->skb = NULL;
4264
4265                 last = skb_shinfo(skb)->nr_frags;
4266                 for (j = 0; j < last; j++) {
4267                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4268                         pci_unmap_page(bp->pdev,
4269                                 pci_unmap_addr(tx_buf, mapping),
4270                                 skb_shinfo(skb)->frags[j].size,
4271                                 PCI_DMA_TODEVICE);
4272                 }
4273                 dev_kfree_skb(skb);
4274                 i += j + 1;
4275         }
4276
4277 }
4278
4279 static void
4280 bnx2_free_rx_skbs(struct bnx2 *bp)
4281 {
4282         int i;
4283
4284         if (bp->rx_buf_ring == NULL)
4285                 return;
4286
4287         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4288                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4289                 struct sk_buff *skb = rx_buf->skb;
4290
4291                 if (skb == NULL)
4292                         continue;
4293
4294                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4295                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4296
4297                 rx_buf->skb = NULL;
4298
4299                 dev_kfree_skb(skb);
4300         }
4301 }
4302
4303 static void
4304 bnx2_free_skbs(struct bnx2 *bp)
4305 {
4306         bnx2_free_tx_skbs(bp);
4307         bnx2_free_rx_skbs(bp);
4308 }
4309
4310 static int
4311 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4312 {
4313         int rc;
4314
4315         rc = bnx2_reset_chip(bp, reset_code);
4316         bnx2_free_skbs(bp);
4317         if (rc)
4318                 return rc;
4319
4320         if ((rc = bnx2_init_chip(bp)) != 0)
4321                 return rc;
4322
4323         bnx2_init_tx_ring(bp);
4324         bnx2_init_rx_ring(bp);
4325         return 0;
4326 }
4327
4328 static int
4329 bnx2_init_nic(struct bnx2 *bp)
4330 {
4331         int rc;
4332
4333         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4334                 return rc;
4335
4336         spin_lock_bh(&bp->phy_lock);
4337         bnx2_init_phy(bp);
4338         bnx2_set_link(bp);
4339         spin_unlock_bh(&bp->phy_lock);
4340         return 0;
4341 }
4342
4343 static int
4344 bnx2_test_registers(struct bnx2 *bp)
4345 {
4346         int ret;
4347         int i, is_5709;
4348         static const struct {
4349                 u16   offset;
4350                 u16   flags;
4351 #define BNX2_FL_NOT_5709        1
4352                 u32   rw_mask;
4353                 u32   ro_mask;
4354         } reg_tbl[] = {
4355                 { 0x006c, 0, 0x00000000, 0x0000003f },
4356                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4357                 { 0x0094, 0, 0x00000000, 0x00000000 },
4358
4359                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4360                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4361                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4362                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4363                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4364                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4365                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4366                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4367                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4368
4369                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4370                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4371                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4372                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4373                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4374                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4375
4376                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4377                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4378                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4379
4380                 { 0x1000, 0, 0x00000000, 0x00000001 },
4381                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4382
4383                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4384                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4385                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4386                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4387                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4388                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4389                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4390                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4391                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4392                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4393
4394                 { 0x1800, 0, 0x00000000, 0x00000001 },
4395                 { 0x1804, 0, 0x00000000, 0x00000003 },
4396
4397                 { 0x2800, 0, 0x00000000, 0x00000001 },
4398                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4399                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4400                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4401                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4402                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4403                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4404                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4405                 { 0x2840, 0, 0x00000000, 0xffffffff },
4406                 { 0x2844, 0, 0x00000000, 0xffffffff },
4407                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4408                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4409
4410                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4411                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4412
4413                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4414                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4415                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4416                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4417                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4418                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4419                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4420                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4421                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4422
4423                 { 0x5004, 0, 0x00000000, 0x0000007f },
4424                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4425
4426                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4427                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4428                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4429                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4430                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4431                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4432                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4433                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4434                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4435
4436                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4437                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4438                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4439                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4440                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4441                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4442                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4443                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4444                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4445                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4446                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4447                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4448                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4449                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4450                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4451                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4452                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4453                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4454                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4455                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4456                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4457                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4458                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4459
4460                 { 0xffff, 0, 0x00000000, 0x00000000 },
4461         };
4462
4463         ret = 0;
4464         is_5709 = 0;
4465         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4466                 is_5709 = 1;
4467
4468         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4469                 u32 offset, rw_mask, ro_mask, save_val, val;
4470                 u16 flags = reg_tbl[i].flags;
4471
4472                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4473                         continue;
4474
4475                 offset = (u32) reg_tbl[i].offset;
4476                 rw_mask = reg_tbl[i].rw_mask;
4477                 ro_mask = reg_tbl[i].ro_mask;
4478
4479                 save_val = readl(bp->regview + offset);
4480
4481                 writel(0, bp->regview + offset);
4482
4483                 val = readl(bp->regview + offset);
4484                 if ((val & rw_mask) != 0) {
4485                         goto reg_test_err;
4486                 }
4487
4488                 if ((val & ro_mask) != (save_val & ro_mask)) {
4489                         goto reg_test_err;
4490                 }
4491
4492                 writel(0xffffffff, bp->regview + offset);
4493
4494                 val = readl(bp->regview + offset);
4495                 if ((val & rw_mask) != rw_mask) {
4496                         goto reg_test_err;
4497                 }
4498
4499                 if ((val & ro_mask) != (save_val & ro_mask)) {
4500                         goto reg_test_err;
4501                 }
4502
4503                 writel(save_val, bp->regview + offset);
4504                 continue;
4505
4506 reg_test_err:
4507                 writel(save_val, bp->regview + offset);
4508                 ret = -ENODEV;
4509                 break;
4510         }
4511         return ret;
4512 }
4513
4514 static int
4515 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4516 {
4517         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4518                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4519         int i;
4520
4521         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4522                 u32 offset;
4523
4524                 for (offset = 0; offset < size; offset += 4) {
4525
4526                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4527
4528                         if (REG_RD_IND(bp, start + offset) !=
4529                                 test_pattern[i]) {
4530                                 return -ENODEV;
4531                         }
4532                 }
4533         }
4534         return 0;
4535 }
4536
4537 static int
4538 bnx2_test_memory(struct bnx2 *bp)
4539 {
4540         int ret = 0;
4541         int i;
4542         static struct mem_entry {
4543                 u32   offset;
4544                 u32   len;
4545         } mem_tbl_5706[] = {
4546                 { 0x60000,  0x4000 },
4547                 { 0xa0000,  0x3000 },
4548                 { 0xe0000,  0x4000 },
4549                 { 0x120000, 0x4000 },
4550                 { 0x1a0000, 0x4000 },
4551                 { 0x160000, 0x4000 },
4552                 { 0xffffffff, 0    },
4553         },
4554         mem_tbl_5709[] = {
4555                 { 0x60000,  0x4000 },
4556                 { 0xa0000,  0x3000 },
4557                 { 0xe0000,  0x4000 },
4558                 { 0x120000, 0x4000 },
4559                 { 0x1a0000, 0x4000 },
4560                 { 0xffffffff, 0    },
4561         };
4562         struct mem_entry *mem_tbl;
4563
4564         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4565                 mem_tbl = mem_tbl_5709;
4566         else
4567                 mem_tbl = mem_tbl_5706;
4568
4569         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4570                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4571                         mem_tbl[i].len)) != 0) {
4572                         return ret;
4573                 }
4574         }
4575
4576         return ret;
4577 }
4578
4579 #define BNX2_MAC_LOOPBACK       0
4580 #define BNX2_PHY_LOOPBACK       1
4581
4582 static int
4583 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4584 {
4585         unsigned int pkt_size, num_pkts, i;
4586         struct sk_buff *skb, *rx_skb;
4587         unsigned char *packet;
4588         u16 rx_start_idx, rx_idx;
4589         dma_addr_t map;
4590         struct tx_bd *txbd;
4591         struct sw_bd *rx_buf;
4592         struct l2_fhdr *rx_hdr;
4593         int ret = -ENODEV;
4594
4595         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4596                 bp->loopback = MAC_LOOPBACK;
4597                 bnx2_set_mac_loopback(bp);
4598         }
4599         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4600                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4601                         return 0;
4602
4603                 bp->loopback = PHY_LOOPBACK;
4604                 bnx2_set_phy_loopback(bp);
4605         }
4606         else
4607                 return -EINVAL;
4608
4609         pkt_size = 1514;
4610         skb = netdev_alloc_skb(bp->dev, pkt_size);
4611         if (!skb)
4612                 return -ENOMEM;
4613         packet = skb_put(skb, pkt_size);
4614         memcpy(packet, bp->dev->dev_addr, 6);
4615         memset(packet + 6, 0x0, 8);
4616         for (i = 14; i < pkt_size; i++)
4617                 packet[i] = (unsigned char) (i & 0xff);
4618
4619         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4620                 PCI_DMA_TODEVICE);
4621
4622         REG_WR(bp, BNX2_HC_COMMAND,
4623                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4624
4625         REG_RD(bp, BNX2_HC_COMMAND);
4626
4627         udelay(5);
4628         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4629
4630         num_pkts = 0;
4631
4632         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4633
4634         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4635         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4636         txbd->tx_bd_mss_nbytes = pkt_size;
4637         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4638
4639         num_pkts++;
4640         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4641         bp->tx_prod_bseq += pkt_size;
4642
4643         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4644         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4645
4646         udelay(100);
4647
4648         REG_WR(bp, BNX2_HC_COMMAND,
4649                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4650
4651         REG_RD(bp, BNX2_HC_COMMAND);
4652
4653         udelay(5);
4654
4655         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4656         dev_kfree_skb(skb);
4657
4658         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4659                 goto loopback_test_done;
4660         }
4661
4662         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4663         if (rx_idx != rx_start_idx + num_pkts) {
4664                 goto loopback_test_done;
4665         }
4666
4667         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4668         rx_skb = rx_buf->skb;
4669
4670         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4671         skb_reserve(rx_skb, bp->rx_offset);
4672
4673         pci_dma_sync_single_for_cpu(bp->pdev,
4674                 pci_unmap_addr(rx_buf, mapping),
4675                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4676
4677         if (rx_hdr->l2_fhdr_status &
4678                 (L2_FHDR_ERRORS_BAD_CRC |
4679                 L2_FHDR_ERRORS_PHY_DECODE |
4680                 L2_FHDR_ERRORS_ALIGNMENT |
4681                 L2_FHDR_ERRORS_TOO_SHORT |
4682                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4683
4684                 goto loopback_test_done;
4685         }
4686
4687         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4688                 goto loopback_test_done;
4689         }
4690
4691         for (i = 14; i < pkt_size; i++) {
4692                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4693                         goto loopback_test_done;
4694                 }
4695         }
4696
4697         ret = 0;
4698
4699 loopback_test_done:
4700         bp->loopback = 0;
4701         return ret;
4702 }
4703
4704 #define BNX2_MAC_LOOPBACK_FAILED        1
4705 #define BNX2_PHY_LOOPBACK_FAILED        2
4706 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4707                                          BNX2_PHY_LOOPBACK_FAILED)
4708
4709 static int
4710 bnx2_test_loopback(struct bnx2 *bp)
4711 {
4712         int rc = 0;
4713
4714         if (!netif_running(bp->dev))
4715                 return BNX2_LOOPBACK_FAILED;
4716
4717         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4718         spin_lock_bh(&bp->phy_lock);
4719         bnx2_init_phy(bp);
4720         spin_unlock_bh(&bp->phy_lock);
4721         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4722                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4723         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4724                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4725         return rc;
4726 }
4727
4728 #define NVRAM_SIZE 0x200
4729 #define CRC32_RESIDUAL 0xdebb20e3
4730
4731 static int
4732 bnx2_test_nvram(struct bnx2 *bp)
4733 {
4734         u32 buf[NVRAM_SIZE / 4];
4735         u8 *data = (u8 *) buf;
4736         int rc = 0;
4737         u32 magic, csum;
4738
4739         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4740                 goto test_nvram_done;
4741
4742         magic = be32_to_cpu(buf[0]);
4743         if (magic != 0x669955aa) {
4744                 rc = -ENODEV;
4745                 goto test_nvram_done;
4746         }
4747
4748         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4749                 goto test_nvram_done;
4750
4751         csum = ether_crc_le(0x100, data);
4752         if (csum != CRC32_RESIDUAL) {
4753                 rc = -ENODEV;
4754                 goto test_nvram_done;
4755         }
4756
4757         csum = ether_crc_le(0x100, data + 0x100);
4758         if (csum != CRC32_RESIDUAL) {
4759                 rc = -ENODEV;
4760         }
4761
4762 test_nvram_done:
4763         return rc;
4764 }
4765
4766 static int
4767 bnx2_test_link(struct bnx2 *bp)
4768 {
4769         u32 bmsr;
4770
4771         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
4772                 if (bp->link_up)
4773                         return 0;
4774                 return -ENODEV;
4775         }
4776         spin_lock_bh(&bp->phy_lock);
4777         bnx2_enable_bmsr1(bp);
4778         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4779         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4780         bnx2_disable_bmsr1(bp);
4781         spin_unlock_bh(&bp->phy_lock);
4782
4783         if (bmsr & BMSR_LSTATUS) {
4784                 return 0;
4785         }
4786         return -ENODEV;
4787 }
4788
4789 static int
4790 bnx2_test_intr(struct bnx2 *bp)
4791 {
4792         int i;
4793         u16 status_idx;
4794
4795         if (!netif_running(bp->dev))
4796                 return -ENODEV;
4797
4798         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4799
4800         /* This register is not touched during run-time. */
4801         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4802         REG_RD(bp, BNX2_HC_COMMAND);
4803
4804         for (i = 0; i < 10; i++) {
4805                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4806                         status_idx) {
4807
4808                         break;
4809                 }
4810
4811                 msleep_interruptible(10);
4812         }
4813         if (i < 10)
4814                 return 0;
4815
4816         return -ENODEV;
4817 }
4818
4819 static void
4820 bnx2_5706_serdes_timer(struct bnx2 *bp)
4821 {
4822         spin_lock(&bp->phy_lock);
4823         if (bp->serdes_an_pending)
4824                 bp->serdes_an_pending--;
4825         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4826                 u32 bmcr;
4827
4828                 bp->current_interval = bp->timer_interval;
4829
4830                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4831
4832                 if (bmcr & BMCR_ANENABLE) {
4833                         u32 phy1, phy2;
4834
4835                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4836                         bnx2_read_phy(bp, 0x1c, &phy1);
4837
4838                         bnx2_write_phy(bp, 0x17, 0x0f01);
4839                         bnx2_read_phy(bp, 0x15, &phy2);
4840                         bnx2_write_phy(bp, 0x17, 0x0f01);
4841                         bnx2_read_phy(bp, 0x15, &phy2);
4842
4843                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4844                                 !(phy2 & 0x20)) {       /* no CONFIG */
4845
4846                                 bmcr &= ~BMCR_ANENABLE;
4847                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4848                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4849                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4850                         }
4851                 }
4852         }
4853         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4854                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4855                 u32 phy2;
4856
4857                 bnx2_write_phy(bp, 0x17, 0x0f01);
4858                 bnx2_read_phy(bp, 0x15, &phy2);
4859                 if (phy2 & 0x20) {
4860                         u32 bmcr;
4861
4862                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4863                         bmcr |= BMCR_ANENABLE;
4864                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4865
4866                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4867                 }
4868         } else
4869                 bp->current_interval = bp->timer_interval;
4870
4871         spin_unlock(&bp->phy_lock);
4872 }
4873
4874 static void
4875 bnx2_5708_serdes_timer(struct bnx2 *bp)
4876 {
4877         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4878                 return;
4879
4880         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4881                 bp->serdes_an_pending = 0;
4882                 return;
4883         }
4884
4885         spin_lock(&bp->phy_lock);
4886         if (bp->serdes_an_pending)
4887                 bp->serdes_an_pending--;
4888         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4889                 u32 bmcr;
4890
4891                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4892                 if (bmcr & BMCR_ANENABLE) {
4893                         bnx2_enable_forced_2g5(bp);
4894                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4895                 } else {
4896                         bnx2_disable_forced_2g5(bp);
4897                         bp->serdes_an_pending = 2;
4898                         bp->current_interval = bp->timer_interval;
4899                 }
4900
4901         } else
4902                 bp->current_interval = bp->timer_interval;
4903
4904         spin_unlock(&bp->phy_lock);
4905 }
4906
4907 static void
4908 bnx2_timer(unsigned long data)
4909 {
4910         struct bnx2 *bp = (struct bnx2 *) data;
4911
4912         if (!netif_running(bp->dev))
4913                 return;
4914
4915         if (atomic_read(&bp->intr_sem) != 0)
4916                 goto bnx2_restart_timer;
4917
4918         bnx2_send_heart_beat(bp);
4919
4920         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4921
4922         /* workaround occasional corrupted counters */
4923         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4924                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4925                                             BNX2_HC_COMMAND_STATS_NOW);
4926
4927         if (bp->phy_flags & PHY_SERDES_FLAG) {
4928                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4929                         bnx2_5706_serdes_timer(bp);
4930                 else
4931                         bnx2_5708_serdes_timer(bp);
4932         }
4933
4934 bnx2_restart_timer:
4935         mod_timer(&bp->timer, jiffies + bp->current_interval);
4936 }
4937
4938 static int
4939 bnx2_request_irq(struct bnx2 *bp)
4940 {
4941         struct net_device *dev = bp->dev;
4942         int rc = 0;
4943
4944         if (bp->flags & USING_MSI_FLAG) {
4945                 irq_handler_t   fn = bnx2_msi;
4946
4947                 if (bp->flags & ONE_SHOT_MSI_FLAG)
4948                         fn = bnx2_msi_1shot;
4949
4950                 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4951         } else
4952                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4953                                  IRQF_SHARED, dev->name, dev);
4954         return rc;
4955 }
4956
4957 static void
4958 bnx2_free_irq(struct bnx2 *bp)
4959 {
4960         struct net_device *dev = bp->dev;
4961
4962         if (bp->flags & USING_MSI_FLAG) {
4963                 free_irq(bp->pdev->irq, dev);
4964                 pci_disable_msi(bp->pdev);
4965                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4966         } else
4967                 free_irq(bp->pdev->irq, dev);
4968 }
4969
4970 /* Called with rtnl_lock */
4971 static int
4972 bnx2_open(struct net_device *dev)
4973 {
4974         struct bnx2 *bp = netdev_priv(dev);
4975         int rc;
4976
4977         netif_carrier_off(dev);
4978
4979         bnx2_set_power_state(bp, PCI_D0);
4980         bnx2_disable_int(bp);
4981
4982         rc = bnx2_alloc_mem(bp);
4983         if (rc)
4984                 return rc;
4985
4986         napi_enable(&bp->napi);
4987
4988         if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
4989                 if (pci_enable_msi(bp->pdev) == 0) {
4990                         bp->flags |= USING_MSI_FLAG;
4991                         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4992                                 bp->flags |= ONE_SHOT_MSI_FLAG;
4993                 }
4994         }
4995         rc = bnx2_request_irq(bp);
4996
4997         if (rc) {
4998                 napi_disable(&bp->napi);
4999                 bnx2_free_mem(bp);
5000                 return rc;
5001         }
5002
5003         rc = bnx2_init_nic(bp);
5004
5005         if (rc) {
5006                 napi_disable(&bp->napi);
5007                 bnx2_free_irq(bp);
5008                 bnx2_free_skbs(bp);
5009                 bnx2_free_mem(bp);
5010                 return rc;
5011         }
5012
5013         mod_timer(&bp->timer, jiffies + bp->current_interval);
5014
5015         atomic_set(&bp->intr_sem, 0);
5016
5017         bnx2_enable_int(bp);
5018
5019         if (bp->flags & USING_MSI_FLAG) {
5020                 /* Test MSI to make sure it is working
5021                  * If MSI test fails, go back to INTx mode
5022                  */
5023                 if (bnx2_test_intr(bp) != 0) {
5024                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5025                                " using MSI, switching to INTx mode. Please"
5026                                " report this failure to the PCI maintainer"
5027                                " and include system chipset information.\n",
5028                                bp->dev->name);
5029
5030                         bnx2_disable_int(bp);
5031                         bnx2_free_irq(bp);
5032
5033                         rc = bnx2_init_nic(bp);
5034
5035                         if (!rc)
5036                                 rc = bnx2_request_irq(bp);
5037
5038                         if (rc) {
5039                                 napi_disable(&bp->napi);
5040                                 bnx2_free_skbs(bp);
5041                                 bnx2_free_mem(bp);
5042                                 del_timer_sync(&bp->timer);
5043                                 return rc;
5044                         }
5045                         bnx2_enable_int(bp);
5046                 }
5047         }
5048         if (bp->flags & USING_MSI_FLAG) {
5049                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5050         }
5051
5052         netif_start_queue(dev);
5053
5054         return 0;
5055 }
5056
5057 static void
5058 bnx2_reset_task(struct work_struct *work)
5059 {
5060         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5061
5062         if (!netif_running(bp->dev))
5063                 return;
5064
5065         bp->in_reset_task = 1;
5066         bnx2_netif_stop(bp);
5067
5068         bnx2_init_nic(bp);
5069
5070         atomic_set(&bp->intr_sem, 1);
5071         bnx2_netif_start(bp);
5072         bp->in_reset_task = 0;
5073 }
5074
5075 static void
5076 bnx2_tx_timeout(struct net_device *dev)
5077 {
5078         struct bnx2 *bp = netdev_priv(dev);
5079
5080         /* This allows the netif to be shutdown gracefully before resetting */
5081         schedule_work(&bp->reset_task);
5082 }
5083
5084 #ifdef BCM_VLAN
5085 /* Called with rtnl_lock */
5086 static void
5087 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5088 {
5089         struct bnx2 *bp = netdev_priv(dev);
5090
5091         bnx2_netif_stop(bp);
5092
5093         bp->vlgrp = vlgrp;
5094         bnx2_set_rx_mode(dev);
5095
5096         bnx2_netif_start(bp);
5097 }
5098 #endif
5099
5100 /* Called with netif_tx_lock.
5101  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5102  * netif_wake_queue().
5103  */
5104 static int
5105 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5106 {
5107         struct bnx2 *bp = netdev_priv(dev);
5108         dma_addr_t mapping;
5109         struct tx_bd *txbd;
5110         struct sw_bd *tx_buf;
5111         u32 len, vlan_tag_flags, last_frag, mss;
5112         u16 prod, ring_prod;
5113         int i;
5114
5115         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5116                 netif_stop_queue(dev);
5117                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5118                         dev->name);
5119
5120                 return NETDEV_TX_BUSY;
5121         }
5122         len = skb_headlen(skb);
5123         prod = bp->tx_prod;
5124         ring_prod = TX_RING_IDX(prod);
5125
5126         vlan_tag_flags = 0;
5127         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5128                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5129         }
5130
5131         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5132                 vlan_tag_flags |=
5133                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5134         }
5135         if ((mss = skb_shinfo(skb)->gso_size)) {
5136                 u32 tcp_opt_len, ip_tcp_len;
5137                 struct iphdr *iph;
5138
5139                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5140
5141                 tcp_opt_len = tcp_optlen(skb);
5142
5143                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5144                         u32 tcp_off = skb_transport_offset(skb) -
5145                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5146
5147                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5148                                           TX_BD_FLAGS_SW_FLAGS;
5149                         if (likely(tcp_off == 0))
5150                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5151                         else {
5152                                 tcp_off >>= 3;
5153                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5154                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5155                                                   ((tcp_off & 0x10) <<
5156                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5157                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5158                         }
5159                 } else {
5160                         if (skb_header_cloned(skb) &&
5161                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5162                                 dev_kfree_skb(skb);
5163                                 return NETDEV_TX_OK;
5164                         }
5165
5166                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5167
5168                         iph = ip_hdr(skb);
5169                         iph->check = 0;
5170                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5171                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5172                                                                  iph->daddr, 0,
5173                                                                  IPPROTO_TCP,
5174                                                                  0);
5175                         if (tcp_opt_len || (iph->ihl > 5)) {
5176                                 vlan_tag_flags |= ((iph->ihl - 5) +
5177                                                    (tcp_opt_len >> 2)) << 8;
5178                         }
5179                 }
5180         } else
5181                 mss = 0;
5182
5183         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5184
5185         tx_buf = &bp->tx_buf_ring[ring_prod];
5186         tx_buf->skb = skb;
5187         pci_unmap_addr_set(tx_buf, mapping, mapping);
5188
5189         txbd = &bp->tx_desc_ring[ring_prod];
5190
5191         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5192         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5193         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5194         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5195
5196         last_frag = skb_shinfo(skb)->nr_frags;
5197
5198         for (i = 0; i < last_frag; i++) {
5199                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5200
5201                 prod = NEXT_TX_BD(prod);
5202                 ring_prod = TX_RING_IDX(prod);
5203                 txbd = &bp->tx_desc_ring[ring_prod];
5204
5205                 len = frag->size;
5206                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5207                         len, PCI_DMA_TODEVICE);
5208                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5209                                 mapping, mapping);
5210
5211                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5212                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5213                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5214                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5215
5216         }
5217         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5218
5219         prod = NEXT_TX_BD(prod);
5220         bp->tx_prod_bseq += skb->len;
5221
5222         REG_WR16(bp, bp->tx_bidx_addr, prod);
5223         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5224
5225         mmiowb();
5226
5227         bp->tx_prod = prod;
5228         dev->trans_start = jiffies;
5229
5230         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5231                 netif_stop_queue(dev);
5232                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5233                         netif_wake_queue(dev);
5234         }
5235
5236         return NETDEV_TX_OK;
5237 }
5238
5239 /* Called with rtnl_lock */
5240 static int
5241 bnx2_close(struct net_device *dev)
5242 {
5243         struct bnx2 *bp = netdev_priv(dev);
5244         u32 reset_code;
5245
5246         /* Calling flush_scheduled_work() may deadlock because
5247          * linkwatch_event() may be on the workqueue and it will try to get
5248          * the rtnl_lock which we are holding.
5249          */
5250         while (bp->in_reset_task)
5251                 msleep(1);
5252
5253         bnx2_disable_int_sync(bp);
5254         napi_disable(&bp->napi);
5255         del_timer_sync(&bp->timer);
5256         if (bp->flags & NO_WOL_FLAG)
5257                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5258         else if (bp->wol)
5259                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5260         else
5261                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5262         bnx2_reset_chip(bp, reset_code);
5263         bnx2_free_irq(bp);
5264         bnx2_free_skbs(bp);
5265         bnx2_free_mem(bp);
5266         bp->link_up = 0;
5267         netif_carrier_off(bp->dev);
5268         bnx2_set_power_state(bp, PCI_D3hot);
5269         return 0;
5270 }
5271
5272 #define GET_NET_STATS64(ctr)                                    \
5273         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5274         (unsigned long) (ctr##_lo)
5275
5276 #define GET_NET_STATS32(ctr)            \
5277         (ctr##_lo)
5278
5279 #if (BITS_PER_LONG == 64)
5280 #define GET_NET_STATS   GET_NET_STATS64
5281 #else
5282 #define GET_NET_STATS   GET_NET_STATS32
5283 #endif
5284
5285 static struct net_device_stats *
5286 bnx2_get_stats(struct net_device *dev)
5287 {
5288         struct bnx2 *bp = netdev_priv(dev);
5289         struct statistics_block *stats_blk = bp->stats_blk;
5290         struct net_device_stats *net_stats = &bp->net_stats;
5291
5292         if (bp->stats_blk == NULL) {
5293                 return net_stats;
5294         }
5295         net_stats->rx_packets =
5296                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5297                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5298                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5299
5300         net_stats->tx_packets =
5301                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5302                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5303                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5304
5305         net_stats->rx_bytes =
5306                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5307
5308         net_stats->tx_bytes =
5309                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5310
5311         net_stats->multicast =
5312                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5313
5314         net_stats->collisions =
5315                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5316
5317         net_stats->rx_length_errors =
5318                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5319                 stats_blk->stat_EtherStatsOverrsizePkts);
5320
5321         net_stats->rx_over_errors =
5322                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5323
5324         net_stats->rx_frame_errors =
5325                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5326
5327         net_stats->rx_crc_errors =
5328                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5329
5330         net_stats->rx_errors = net_stats->rx_length_errors +
5331                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5332                 net_stats->rx_crc_errors;
5333
5334         net_stats->tx_aborted_errors =
5335                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5336                 stats_blk->stat_Dot3StatsLateCollisions);
5337
5338         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5339             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5340                 net_stats->tx_carrier_errors = 0;
5341         else {
5342                 net_stats->tx_carrier_errors =
5343                         (unsigned long)
5344                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5345         }
5346
5347         net_stats->tx_errors =
5348                 (unsigned long)
5349                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5350                 +
5351                 net_stats->tx_aborted_errors +
5352                 net_stats->tx_carrier_errors;
5353
5354         net_stats->rx_missed_errors =
5355                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5356                 stats_blk->stat_FwRxDrop);
5357
5358         return net_stats;
5359 }
5360
5361 /* All ethtool functions called with rtnl_lock */
5362
5363 static int
5364 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5365 {
5366         struct bnx2 *bp = netdev_priv(dev);
5367         int support_serdes = 0, support_copper = 0;
5368
5369         cmd->supported = SUPPORTED_Autoneg;
5370         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5371                 support_serdes = 1;
5372                 support_copper = 1;
5373         } else if (bp->phy_port == PORT_FIBRE)
5374                 support_serdes = 1;
5375         else
5376                 support_copper = 1;
5377
5378         if (support_serdes) {
5379                 cmd->supported |= SUPPORTED_1000baseT_Full |
5380                         SUPPORTED_FIBRE;
5381                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5382                         cmd->supported |= SUPPORTED_2500baseX_Full;
5383
5384         }
5385         if (support_copper) {
5386                 cmd->supported |= SUPPORTED_10baseT_Half |
5387                         SUPPORTED_10baseT_Full |
5388                         SUPPORTED_100baseT_Half |
5389                         SUPPORTED_100baseT_Full |
5390                         SUPPORTED_1000baseT_Full |
5391                         SUPPORTED_TP;
5392
5393         }
5394
5395         spin_lock_bh(&bp->phy_lock);
5396         cmd->port = bp->phy_port;
5397         cmd->advertising = bp->advertising;
5398
5399         if (bp->autoneg & AUTONEG_SPEED) {
5400                 cmd->autoneg = AUTONEG_ENABLE;
5401         }
5402         else {
5403                 cmd->autoneg = AUTONEG_DISABLE;
5404         }
5405
5406         if (netif_carrier_ok(dev)) {
5407                 cmd->speed = bp->line_speed;
5408                 cmd->duplex = bp->duplex;
5409         }
5410         else {
5411                 cmd->speed = -1;
5412                 cmd->duplex = -1;
5413         }
5414         spin_unlock_bh(&bp->phy_lock);
5415
5416         cmd->transceiver = XCVR_INTERNAL;
5417         cmd->phy_address = bp->phy_addr;
5418
5419         return 0;
5420 }
5421
5422 static int
5423 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5424 {
5425         struct bnx2 *bp = netdev_priv(dev);
5426         u8 autoneg = bp->autoneg;
5427         u8 req_duplex = bp->req_duplex;
5428         u16 req_line_speed = bp->req_line_speed;
5429         u32 advertising = bp->advertising;
5430         int err = -EINVAL;
5431
5432         spin_lock_bh(&bp->phy_lock);
5433
5434         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5435                 goto err_out_unlock;
5436
5437         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5438                 goto err_out_unlock;
5439
5440         if (cmd->autoneg == AUTONEG_ENABLE) {
5441                 autoneg |= AUTONEG_SPEED;
5442
5443                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5444
5445                 /* allow advertising 1 speed */
5446                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5447                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5448                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5449                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5450
5451                         if (cmd->port == PORT_FIBRE)
5452                                 goto err_out_unlock;
5453
5454                         advertising = cmd->advertising;
5455
5456                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5457                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5458                             (cmd->port == PORT_TP))
5459                                 goto err_out_unlock;
5460                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5461                         advertising = cmd->advertising;
5462                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5463                         goto err_out_unlock;
5464                 else {
5465                         if (cmd->port == PORT_FIBRE)
5466                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5467                         else
5468                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5469                 }
5470                 advertising |= ADVERTISED_Autoneg;
5471         }
5472         else {
5473                 if (cmd->port == PORT_FIBRE) {
5474                         if ((cmd->speed != SPEED_1000 &&
5475                              cmd->speed != SPEED_2500) ||
5476                             (cmd->duplex != DUPLEX_FULL))
5477                                 goto err_out_unlock;
5478
5479                         if (cmd->speed == SPEED_2500 &&
5480                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5481                                 goto err_out_unlock;
5482                 }
5483                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5484                         goto err_out_unlock;
5485
5486                 autoneg &= ~AUTONEG_SPEED;
5487                 req_line_speed = cmd->speed;
5488                 req_duplex = cmd->duplex;
5489                 advertising = 0;
5490         }
5491
5492         bp->autoneg = autoneg;
5493         bp->advertising = advertising;
5494         bp->req_line_speed = req_line_speed;
5495         bp->req_duplex = req_duplex;
5496
5497         err = bnx2_setup_phy(bp, cmd->port);
5498
5499 err_out_unlock:
5500         spin_unlock_bh(&bp->phy_lock);
5501
5502         return err;
5503 }
5504
5505 static void
5506 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5507 {
5508         struct bnx2 *bp = netdev_priv(dev);
5509
5510         strcpy(info->driver, DRV_MODULE_NAME);
5511         strcpy(info->version, DRV_MODULE_VERSION);
5512         strcpy(info->bus_info, pci_name(bp->pdev));
5513         strcpy(info->fw_version, bp->fw_version);
5514 }
5515
5516 #define BNX2_REGDUMP_LEN                (32 * 1024)
5517
5518 static int
5519 bnx2_get_regs_len(struct net_device *dev)
5520 {
5521         return BNX2_REGDUMP_LEN;
5522 }
5523
5524 static void
5525 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5526 {
5527         u32 *p = _p, i, offset;
5528         u8 *orig_p = _p;
5529         struct bnx2 *bp = netdev_priv(dev);
5530         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5531                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5532                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5533                                  0x1040, 0x1048, 0x1080, 0x10a4,
5534                                  0x1400, 0x1490, 0x1498, 0x14f0,
5535                                  0x1500, 0x155c, 0x1580, 0x15dc,
5536                                  0x1600, 0x1658, 0x1680, 0x16d8,
5537                                  0x1800, 0x1820, 0x1840, 0x1854,
5538                                  0x1880, 0x1894, 0x1900, 0x1984,
5539                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5540                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5541                                  0x2000, 0x2030, 0x23c0, 0x2400,
5542                                  0x2800, 0x2820, 0x2830, 0x2850,
5543                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5544                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5545                                  0x4080, 0x4090, 0x43c0, 0x4458,
5546                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5547                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5548                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5549                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5550                                  0x6800, 0x6848, 0x684c, 0x6860,
5551                                  0x6888, 0x6910, 0x8000 };
5552
5553         regs->version = 0;
5554
5555         memset(p, 0, BNX2_REGDUMP_LEN);
5556
5557         if (!netif_running(bp->dev))
5558                 return;
5559
5560         i = 0;
5561         offset = reg_boundaries[0];
5562         p += offset;
5563         while (offset < BNX2_REGDUMP_LEN) {
5564                 *p++ = REG_RD(bp, offset);
5565                 offset += 4;
5566                 if (offset == reg_boundaries[i + 1]) {
5567                         offset = reg_boundaries[i + 2];
5568                         p = (u32 *) (orig_p + offset);
5569                         i += 2;
5570                 }
5571         }
5572 }
5573
5574 static void
5575 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5576 {
5577         struct bnx2 *bp = netdev_priv(dev);
5578
5579         if (bp->flags & NO_WOL_FLAG) {
5580                 wol->supported = 0;
5581                 wol->wolopts = 0;
5582         }
5583         else {
5584                 wol->supported = WAKE_MAGIC;
5585                 if (bp->wol)
5586                         wol->wolopts = WAKE_MAGIC;
5587                 else
5588                         wol->wolopts = 0;
5589         }
5590         memset(&wol->sopass, 0, sizeof(wol->sopass));
5591 }
5592
5593 static int
5594 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5595 {
5596         struct bnx2 *bp = netdev_priv(dev);
5597
5598         if (wol->wolopts & ~WAKE_MAGIC)
5599                 return -EINVAL;
5600
5601         if (wol->wolopts & WAKE_MAGIC) {
5602                 if (bp->flags & NO_WOL_FLAG)
5603                         return -EINVAL;
5604
5605                 bp->wol = 1;
5606         }
5607         else {
5608                 bp->wol = 0;
5609         }
5610         return 0;
5611 }
5612
5613 static int
5614 bnx2_nway_reset(struct net_device *dev)
5615 {
5616         struct bnx2 *bp = netdev_priv(dev);
5617         u32 bmcr;
5618
5619         if (!(bp->autoneg & AUTONEG_SPEED)) {
5620                 return -EINVAL;
5621         }
5622
5623         spin_lock_bh(&bp->phy_lock);
5624
5625         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5626                 int rc;
5627
5628                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5629                 spin_unlock_bh(&bp->phy_lock);
5630                 return rc;
5631         }
5632
5633         /* Force a link down visible on the other side */
5634         if (bp->phy_flags & PHY_SERDES_FLAG) {
5635                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5636                 spin_unlock_bh(&bp->phy_lock);
5637
5638                 msleep(20);
5639
5640                 spin_lock_bh(&bp->phy_lock);
5641
5642                 bp->current_interval = SERDES_AN_TIMEOUT;
5643                 bp->serdes_an_pending = 1;
5644                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5645         }
5646
5647         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5648         bmcr &= ~BMCR_LOOPBACK;
5649         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5650
5651         spin_unlock_bh(&bp->phy_lock);
5652
5653         return 0;
5654 }
5655
5656 static int
5657 bnx2_get_eeprom_len(struct net_device *dev)
5658 {
5659         struct bnx2 *bp = netdev_priv(dev);
5660
5661         if (bp->flash_info == NULL)
5662                 return 0;
5663
5664         return (int) bp->flash_size;
5665 }
5666
5667 static int
5668 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5669                 u8 *eebuf)
5670 {
5671         struct bnx2 *bp = netdev_priv(dev);
5672         int rc;
5673
5674         /* parameters already validated in ethtool_get_eeprom */
5675
5676         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5677
5678         return rc;
5679 }
5680
5681 static int
5682 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5683                 u8 *eebuf)
5684 {
5685         struct bnx2 *bp = netdev_priv(dev);
5686         int rc;
5687
5688         /* parameters already validated in ethtool_set_eeprom */
5689
5690         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5691
5692         return rc;
5693 }
5694
5695 static int
5696 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5697 {
5698         struct bnx2 *bp = netdev_priv(dev);
5699
5700         memset(coal, 0, sizeof(struct ethtool_coalesce));
5701
5702         coal->rx_coalesce_usecs = bp->rx_ticks;
5703         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5704         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5705         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5706
5707         coal->tx_coalesce_usecs = bp->tx_ticks;
5708         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5709         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5710         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5711
5712         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5713
5714         return 0;
5715 }
5716
5717 static int
5718 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5719 {
5720         struct bnx2 *bp = netdev_priv(dev);
5721
5722         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5723         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5724
5725         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5726         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5727
5728         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5729         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5730
5731         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5732         if (bp->rx_quick_cons_trip_int > 0xff)
5733                 bp->rx_quick_cons_trip_int = 0xff;
5734
5735         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5736         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5737
5738         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5739         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5740
5741         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5742         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5743
5744         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5745         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5746                 0xff;
5747
5748         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5749         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5750                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5751                         bp->stats_ticks = USEC_PER_SEC;
5752         }
5753         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5754                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5755         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5756
5757         if (netif_running(bp->dev)) {
5758                 bnx2_netif_stop(bp);
5759                 bnx2_init_nic(bp);
5760                 bnx2_netif_start(bp);
5761         }
5762
5763         return 0;
5764 }
5765
5766 static void
5767 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5768 {
5769         struct bnx2 *bp = netdev_priv(dev);
5770
5771         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5772         ering->rx_mini_max_pending = 0;
5773         ering->rx_jumbo_max_pending = 0;
5774
5775         ering->rx_pending = bp->rx_ring_size;
5776         ering->rx_mini_pending = 0;
5777         ering->rx_jumbo_pending = 0;
5778
5779         ering->tx_max_pending = MAX_TX_DESC_CNT;
5780         ering->tx_pending = bp->tx_ring_size;
5781 }
5782
5783 static int
5784 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5785 {
5786         struct bnx2 *bp = netdev_priv(dev);
5787
5788         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5789                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5790                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5791
5792                 return -EINVAL;
5793         }
5794         if (netif_running(bp->dev)) {
5795                 bnx2_netif_stop(bp);
5796                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5797                 bnx2_free_skbs(bp);
5798                 bnx2_free_mem(bp);
5799         }
5800
5801         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5802         bp->tx_ring_size = ering->tx_pending;
5803
5804         if (netif_running(bp->dev)) {
5805                 int rc;
5806
5807                 rc = bnx2_alloc_mem(bp);
5808                 if (rc)
5809                         return rc;
5810                 bnx2_init_nic(bp);
5811                 bnx2_netif_start(bp);
5812         }
5813
5814         return 0;
5815 }
5816
5817 static void
5818 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5819 {
5820         struct bnx2 *bp = netdev_priv(dev);
5821
5822         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5823         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5824         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5825 }
5826
5827 static int
5828 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5829 {
5830         struct bnx2 *bp = netdev_priv(dev);
5831
5832         bp->req_flow_ctrl = 0;
5833         if (epause->rx_pause)
5834                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5835         if (epause->tx_pause)
5836                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5837
5838         if (epause->autoneg) {
5839                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5840         }
5841         else {
5842                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5843         }
5844
5845         spin_lock_bh(&bp->phy_lock);
5846
5847         bnx2_setup_phy(bp, bp->phy_port);
5848
5849         spin_unlock_bh(&bp->phy_lock);
5850
5851         return 0;
5852 }
5853
5854 static u32
5855 bnx2_get_rx_csum(struct net_device *dev)
5856 {
5857         struct bnx2 *bp = netdev_priv(dev);
5858
5859         return bp->rx_csum;
5860 }
5861
5862 static int
5863 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5864 {
5865         struct bnx2 *bp = netdev_priv(dev);
5866
5867         bp->rx_csum = data;
5868         return 0;
5869 }
5870
5871 static int
5872 bnx2_set_tso(struct net_device *dev, u32 data)
5873 {
5874         struct bnx2 *bp = netdev_priv(dev);
5875
5876         if (data) {
5877                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5878                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5879                         dev->features |= NETIF_F_TSO6;
5880         } else
5881                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5882                                    NETIF_F_TSO_ECN);
5883         return 0;
5884 }
5885
5886 #define BNX2_NUM_STATS 46
5887
5888 static struct {
5889         char string[ETH_GSTRING_LEN];
5890 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5891         { "rx_bytes" },
5892         { "rx_error_bytes" },
5893         { "tx_bytes" },
5894         { "tx_error_bytes" },
5895         { "rx_ucast_packets" },
5896         { "rx_mcast_packets" },
5897         { "rx_bcast_packets" },
5898         { "tx_ucast_packets" },
5899         { "tx_mcast_packets" },
5900         { "tx_bcast_packets" },
5901         { "tx_mac_errors" },
5902         { "tx_carrier_errors" },
5903         { "rx_crc_errors" },
5904         { "rx_align_errors" },
5905         { "tx_single_collisions" },
5906         { "tx_multi_collisions" },
5907         { "tx_deferred" },
5908         { "tx_excess_collisions" },
5909         { "tx_late_collisions" },
5910         { "tx_total_collisions" },
5911         { "rx_fragments" },
5912         { "rx_jabbers" },
5913         { "rx_undersize_packets" },
5914         { "rx_oversize_packets" },
5915         { "rx_64_byte_packets" },
5916         { "rx_65_to_127_byte_packets" },
5917         { "rx_128_to_255_byte_packets" },
5918         { "rx_256_to_511_byte_packets" },
5919         { "rx_512_to_1023_byte_packets" },
5920         { "rx_1024_to_1522_byte_packets" },
5921         { "rx_1523_to_9022_byte_packets" },
5922         { "tx_64_byte_packets" },
5923         { "tx_65_to_127_byte_packets" },
5924         { "tx_128_to_255_byte_packets" },
5925         { "tx_256_to_511_byte_packets" },
5926         { "tx_512_to_1023_byte_packets" },
5927         { "tx_1024_to_1522_byte_packets" },
5928         { "tx_1523_to_9022_byte_packets" },
5929         { "rx_xon_frames" },
5930         { "rx_xoff_frames" },
5931         { "tx_xon_frames" },
5932         { "tx_xoff_frames" },
5933         { "rx_mac_ctrl_frames" },
5934         { "rx_filtered_packets" },
5935         { "rx_discards" },
5936         { "rx_fw_discards" },
5937 };
5938
5939 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5940
5941 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5942     STATS_OFFSET32(stat_IfHCInOctets_hi),
5943     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5944     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5945     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5946     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5947     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5948     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5949     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5950     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5951     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5952     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5953     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5954     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5955     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5956     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5957     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5958     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5959     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5960     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5961     STATS_OFFSET32(stat_EtherStatsCollisions),
5962     STATS_OFFSET32(stat_EtherStatsFragments),
5963     STATS_OFFSET32(stat_EtherStatsJabbers),
5964     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5965     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5966     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5967     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5968     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5969     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5970     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5971     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5972     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5973     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5974     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5975     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5976     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5977     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5978     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5979     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5980     STATS_OFFSET32(stat_XonPauseFramesReceived),
5981     STATS_OFFSET32(stat_XoffPauseFramesReceived),
5982     STATS_OFFSET32(stat_OutXonSent),
5983     STATS_OFFSET32(stat_OutXoffSent),
5984     STATS_OFFSET32(stat_MacControlFramesReceived),
5985     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5986     STATS_OFFSET32(stat_IfInMBUFDiscards),
5987     STATS_OFFSET32(stat_FwRxDrop),
5988 };
5989
5990 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5991  * skipped because of errata.
5992  */
5993 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5994         8,0,8,8,8,8,8,8,8,8,
5995         4,0,4,4,4,4,4,4,4,4,
5996         4,4,4,4,4,4,4,4,4,4,
5997         4,4,4,4,4,4,4,4,4,4,
5998         4,4,4,4,4,4,
5999 };
6000
6001 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6002         8,0,8,8,8,8,8,8,8,8,
6003         4,4,4,4,4,4,4,4,4,4,
6004         4,4,4,4,4,4,4,4,4,4,
6005         4,4,4,4,4,4,4,4,4,4,
6006         4,4,4,4,4,4,
6007 };
6008
6009 #define BNX2_NUM_TESTS 6
6010
6011 static struct {
6012         char string[ETH_GSTRING_LEN];
6013 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6014         { "register_test (offline)" },
6015         { "memory_test (offline)" },
6016         { "loopback_test (offline)" },
6017         { "nvram_test (online)" },
6018         { "interrupt_test (online)" },
6019         { "link_test (online)" },
6020 };
6021
6022 static int
6023 bnx2_get_sset_count(struct net_device *dev, int sset)
6024 {
6025         switch (sset) {
6026         case ETH_SS_TEST:
6027                 return BNX2_NUM_TESTS;
6028         case ETH_SS_STATS:
6029                 return BNX2_NUM_STATS;
6030         default:
6031                 return -EOPNOTSUPP;
6032         }
6033 }
6034
6035 static void
6036 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6037 {
6038         struct bnx2 *bp = netdev_priv(dev);
6039
6040         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6041         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6042                 int i;
6043
6044                 bnx2_netif_stop(bp);
6045                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6046                 bnx2_free_skbs(bp);
6047
6048                 if (bnx2_test_registers(bp) != 0) {
6049                         buf[0] = 1;
6050                         etest->flags |= ETH_TEST_FL_FAILED;
6051                 }
6052                 if (bnx2_test_memory(bp) != 0) {
6053                         buf[1] = 1;
6054                         etest->flags |= ETH_TEST_FL_FAILED;
6055                 }
6056                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6057                         etest->flags |= ETH_TEST_FL_FAILED;
6058
6059                 if (!netif_running(bp->dev)) {
6060                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6061                 }
6062                 else {
6063                         bnx2_init_nic(bp);
6064                         bnx2_netif_start(bp);
6065                 }
6066
6067                 /* wait for link up */
6068                 for (i = 0; i < 7; i++) {
6069                         if (bp->link_up)
6070                                 break;
6071                         msleep_interruptible(1000);
6072                 }
6073         }
6074
6075         if (bnx2_test_nvram(bp) != 0) {
6076                 buf[3] = 1;
6077                 etest->flags |= ETH_TEST_FL_FAILED;
6078         }
6079         if (bnx2_test_intr(bp) != 0) {
6080                 buf[4] = 1;
6081                 etest->flags |= ETH_TEST_FL_FAILED;
6082         }
6083
6084         if (bnx2_test_link(bp) != 0) {
6085                 buf[5] = 1;
6086                 etest->flags |= ETH_TEST_FL_FAILED;
6087
6088         }
6089 }
6090
6091 static void
6092 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6093 {
6094         switch (stringset) {
6095         case ETH_SS_STATS:
6096                 memcpy(buf, bnx2_stats_str_arr,
6097                         sizeof(bnx2_stats_str_arr));
6098                 break;
6099         case ETH_SS_TEST:
6100                 memcpy(buf, bnx2_tests_str_arr,
6101                         sizeof(bnx2_tests_str_arr));
6102                 break;
6103         }
6104 }
6105
6106 static void
6107 bnx2_get_ethtool_stats(struct net_device *dev,
6108                 struct ethtool_stats *stats, u64 *buf)
6109 {
6110         struct bnx2 *bp = netdev_priv(dev);
6111         int i;
6112         u32 *hw_stats = (u32 *) bp->stats_blk;
6113         u8 *stats_len_arr = NULL;
6114
6115         if (hw_stats == NULL) {
6116                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6117                 return;
6118         }
6119
6120         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6121             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6122             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6123             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6124                 stats_len_arr = bnx2_5706_stats_len_arr;
6125         else
6126                 stats_len_arr = bnx2_5708_stats_len_arr;
6127
6128         for (i = 0; i < BNX2_NUM_STATS; i++) {
6129                 if (stats_len_arr[i] == 0) {
6130                         /* skip this counter */
6131                         buf[i] = 0;
6132                         continue;
6133                 }
6134                 if (stats_len_arr[i] == 4) {